From 81d74cd8caff78e003a53a57483d4cf29eb7b528 Mon Sep 17 00:00:00 2001 From: Zeeshan Lakhani Date: Fri, 9 Oct 2015 17:52:36 -0400 Subject: [PATCH 01/11] yokozuna_rt additions for types and better testing w/ allow-mult=true extractors test (cherry picked from commit 844ee9cdb20bd2f476d28fe7ef80967e23d09046) --- src/yokozuna_rt.erl | 26 ++++++++++++++++++++------ tests/yz_extractors.erl | 21 ++++++++++++++------- tests/yz_handoff.erl | 3 +-- 3 files changed, 35 insertions(+), 15 deletions(-) diff --git a/src/yokozuna_rt.erl b/src/yokozuna_rt.erl index 7723d3e5f..3ddaee5ab 100644 --- a/src/yokozuna_rt.erl +++ b/src/yokozuna_rt.erl @@ -23,6 +23,7 @@ -include("yokozuna_rt.hrl"). -export([check_exists/2, + clear_trees/1, commit/2, expire_trees/1, gen_keys/1, @@ -225,6 +226,15 @@ expire_trees(Cluster) -> timer:sleep(100), ok. +%% @doc Expire YZ trees +-spec clear_trees([node()]) -> ok. +clear_trees(Cluster) -> + lager:info("Expire all trees"), + _ = [ok = rpc:call(Node, yz_entropy_mgr, clear_trees, []) + || Node <- Cluster], + ok. + + %% @doc Remove index directories, removing the index. -spec remove_index_dirs([node()], index_name()) -> ok. remove_index_dirs(Nodes, IndexName) -> @@ -364,20 +374,24 @@ create_and_set_index(Cluster, Pid, Bucket, Index) -> ok = riakc_pb_socket:create_search_index(Pid, Index), %% For possible legacy upgrade reasons, wrap create index in a wait wait_for_index(Cluster, Index), - set_index(Pid, Bucket, Index). + set_index(Pid, hd(Cluster), Bucket, Index). -spec create_and_set_index([node()], pid(), bucket(), index_name(), schema_name()) -> ok. create_and_set_index(Cluster, Pid, Bucket, Index, Schema) -> %% Create a search index and associate with a bucket lager:info("Create a search index ~s with a custom schema named ~s and " ++ - "associate it with bucket ~s", [Index, Schema, Bucket]), + "associate it with bucket ~p", [Index, Schema, Bucket]), ok = riakc_pb_socket:create_search_index(Pid, Index, Schema, []), %% For possible legacy upgrade reasons, wrap create index in a wait wait_for_index(Cluster, Index), - set_index(Pid, Bucket, Index). - --spec set_index(pid(), bucket(), index_name()) -> ok. -set_index(Pid, Bucket, Index) -> + set_index(Pid, hd(Cluster), Bucket, Index). + +-spec set_index(pid(), node(), bucket(), index_name()) -> ok. +set_index(_Pid, Node, {BucketType, _Bucket}, Index) -> + lager:info("Create and activate map-based bucket type ~s and tie it to search_index ~s", + [BucketType, Index]), + rt:create_and_activate_bucket_type(Node, BucketType, [{search_index, Index}]); +set_index(Pid, _Node, Bucket, Index) -> ok = riakc_pb_socket:set_search_index(Pid, Bucket, Index). internal_solr_url(Host, Port, Index) -> diff --git a/tests/yz_extractors.erl b/tests/yz_extractors.erl index 84f7d8b5e..33d6ee8e9 100644 --- a/tests/yz_extractors.erl +++ b/tests/yz_extractors.erl @@ -28,10 +28,12 @@ -include_lib("riakc/include/riakc.hrl"). -define(FMT(S, Args), lists:flatten(io_lib:format(S, Args))). +-define(TYPE1, <<"extractors_in_paradise">>). +-define(TYPE2, <<"extractors_in_paradiso">>). -define(INDEX1, <<"test_idx1">>). --define(BUCKET1, <<"test_bkt1">>). +-define(BUCKET1, {?TYPE1, <<"test_bkt1">>}). -define(INDEX2, <<"test_idx2">>). --define(BUCKET2, <<"test_bkt2">>). +-define(BUCKET2, {?TYPE2, <<"test_bkt2">>}). -define(SCHEMANAME, <<"test">>). -define(TEST_SCHEMA, <<" @@ -278,9 +280,9 @@ get_map(Node) -> verify_extractor(Node, PacketData, Mod) -> rpc:call(Node, yz_extractor, run, [PacketData, Mod]). -bucket_url({Host,Port}, BName, Key) -> - ?FMT("http://~s:~B/buckets/~s/keys/~s", - [Host, Port, BName, Key]). +bucket_url({Host,Port}, {BType, BName}, Key) -> + ?FMT("http://~s:~B/types/~s/buckets/~s/keys/~s", + [Host, Port, BType, BName, Key]). test_extractor_works(Cluster, Packet) -> [rt_intercept:add(ANode, {yz_noop_extractor, @@ -304,7 +306,7 @@ test_extractor_with_aae_expire(Cluster, Index, Bucket, Packet) -> {Host, Port} = rt:select_random(yokozuna_rt:host_entries( rt:connection_info( Cluster))), - URL = bucket_url({Host, Port}, mochiweb_util:quote_plus(Bucket), + URL = bucket_url({Host, Port}, Bucket, mochiweb_util:quote_plus(Key)), CT = ?EXTRACTOR_CT, @@ -326,8 +328,13 @@ test_extractor_with_aae_expire(Cluster, Index, Bucket, Packet) -> yokozuna_rt:override_schema(APid, Cluster, Index, ?SCHEMANAME, ?TEST_SCHEMA_UPGRADE), + {ok, "200", RHeaders, _} = ibrowse:send_req(URL, [{"Content-Type", CT}], get, + [], []), + VC = proplists:get_value("X-Riak-Vclock", RHeaders), + {ok, "204", _, _} = ibrowse:send_req( - URL, [{"Content-Type", CT}], put, Packet), + URL, [{"Content-Type", CT}, {"X-Riak-Vclock", VC}], + put, Packet), yokozuna_rt:commit(Cluster, Index), yokozuna_rt:search_expect({Host, Port}, Index, <<"method">>, diff --git a/tests/yz_handoff.erl b/tests/yz_handoff.erl index ab91d3bdb..4c5b1af6d 100644 --- a/tests/yz_handoff.erl +++ b/tests/yz_handoff.erl @@ -99,8 +99,7 @@ confirm() -> join_node = Node1, admin_node = Node2}], - %% Run Shell Script to count/test # of replicas and leave/join - %% nodes from the cluster + %% Run set of leave/join trials and count/test #'s from the cluster [[begin check_data(Nodes, KeyCount, BucketURL, SearchURL, State), check_counts(Pid, KeyCount, BucketURL) From a255cc153905b6d6e71df9f892f707788afa0fcf Mon Sep 17 00:00:00 2001 From: Brett Hazen Date: Tue, 10 Nov 2015 12:11:27 -0800 Subject: [PATCH 02/11] Add debugging to rt:do_commit/1 to track nothing_planned --- src/rt.erl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/rt.erl b/src/rt.erl index 96f31e2c1..829eefac5 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -449,7 +449,7 @@ staged_join(Node, PNode) -> plan_and_commit(Node) -> timer:sleep(500), - lager:info("planning and commiting cluster join"), + lager:info("planning cluster join"), case rpc:call(Node, riak_core_claimant, plan, []) of {error, ring_not_ready} -> lager:info("plan: ring not ready"), @@ -461,6 +461,7 @@ plan_and_commit(Node) -> end. do_commit(Node) -> + lager:info("planning cluster commit"), case rpc:call(Node, riak_core_claimant, commit, []) of {error, plan_changed} -> lager:info("commit: plan changed"), @@ -472,8 +473,9 @@ do_commit(Node) -> timer:sleep(100), maybe_wait_for_changes(Node), do_commit(Node); - {error,nothing_planned} -> + {error, nothing_planned} -> %% Assume plan actually committed somehow + lager:info("commit: nothing planned"), ok; ok -> ok From dd033f974623ca436bdadd68669f26eef21e23bd Mon Sep 17 00:00:00 2001 From: Zeeshan Lakhani Date: Wed, 18 Nov 2015 12:50:14 -0500 Subject: [PATCH 03/11] sure up possible race between upgrade and first check (cherry picked from commit 867dc1a0364e8a19e1a929dd9f3385f46703cfc4) --- tests/yz_extractors.erl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tests/yz_extractors.erl b/tests/yz_extractors.erl index 33d6ee8e9..cf596c43c 100644 --- a/tests/yz_extractors.erl +++ b/tests/yz_extractors.erl @@ -207,6 +207,8 @@ confirm() -> %% Upgrade yokozuna_rt:rolling_upgrade(Cluster, current), + [rt:wait_until_ready(ANode) || ANode <- Cluster], + [rt:assert_capability(ANode, ?YZ_CAP, true) || ANode <- Cluster], [rt:assert_supported(rt:capability(ANode, all), ?YZ_CAP, [true, false]) || ANode <- Cluster], From 9551318edd3a94ea0a9a806bd8f69d1573fe3940 Mon Sep 17 00:00:00 2001 From: Doug Rohrer Date: Mon, 30 Nov 2015 19:18:59 -0500 Subject: [PATCH 04/11] Update proxy_overload_recovery to handle indeterminism more gracefully. - update `prepare` so that it waits for the vnode PID to change as it's possible that monitors haven't fired by the time we ask for a new one - Update `drain` to require 3 consecutive 0 counts from a group of PIDs rather than draining individual processes or only trying once. The number 3 was determined empirically - read the comment above `drain` if it continues to cause issues. - Rewrite `resume` to make sure things are drained properly before continuing. --- tests/proxy_overload_recovery.erl | 74 ++++++++++++++++++++++--------- 1 file changed, 52 insertions(+), 22 deletions(-) diff --git a/tests/proxy_overload_recovery.erl b/tests/proxy_overload_recovery.erl index 111320422..03248d008 100644 --- a/tests/proxy_overload_recovery.erl +++ b/tests/proxy_overload_recovery.erl @@ -208,13 +208,14 @@ prepare(ThresholdSeed) -> {ok, VPid0} = riak_core_vnode_manager:get_vnode_pid(Id, riak_kv_vnode), sys:resume(VPid0), ok = supervisor:terminate_child(riak_core_vnode_sup, VPid0), - false = is_process_alive(VPid0), %% Reset the proxy pid to make sure it resets state and picks up the new %% environment variables ok = supervisor:terminate_child(riak_core_vnode_proxy_sup, {riak_kv_vnode, Id}), RegName = riak_core_vnode_proxy:reg_name(riak_kv_vnode, Index), undefined = whereis(RegName), + VPid1 = wait_for_vnode_change(VPid0, Index), + {ok, PPid} = supervisor:restart_child(riak_core_vnode_proxy_sup, {riak_kv_vnode, Id}), %% Find the proxy pid and check it's alive and matches the supervisor @@ -225,6 +226,7 @@ prepare(ThresholdSeed) -> %% and return the Pid so we know we have the same Pid. {ok, VPid} = riak_core_vnode_proxy:command_return_vnode( {riak_kv_vnode,Index,node()}, timeout), + ?assertEqual(VPid, VPid1), true = is_process_alive(PPid), true = is_process_alive(VPid), @@ -264,14 +266,14 @@ resume_args(#tstate{rt = RT}) -> resume(#rt{ppid = PPid, vpid = VPid}) -> sys:resume(VPid), %% Use the sys:get_status call to force a synchronous call - %% against the vnode proxy to ensure all messages sent by + %% against the vnode & the proxy to ensure all messages sent by %% this process have been serviced and there are no pending %% 'ping's in the vnode before we continue. %% Then drain the vnode to make sure any pending pongs have - %% been sent. - ok = drain(VPid), + %% been sent, and ensure the proxy has + _ = sys:get_status(PPid), _ = sys:get_status(VPid), - _ = sys:get_status(PPid). + ok = drain([VPid, PPid]). resume_next(S, _V, _A) -> S#tstate{vnode_running = true, proxy_msgs = 0, direct_msgs = 0}. @@ -324,28 +326,28 @@ overloaded_args(#tstate{vnode_running = Running, rt = RT}) -> overloaded(Running, #rt{ppid = PPid, vpid = VPid}) -> case Running of true -> - ok = drain(PPid), % make sure all proxy msgs processed/dropped - ok = drain(VPid); % make sure any pending ping/pongs are processed + ok = drain([PPid, VPid]); _ -> ok end, - {riak_core_vnode_proxy:overloaded(PPid), - msgq_len(VPid), % just for debug so we can review in log output - sys:get_status(PPid)}. % ditto + {messages, PMsgs} = process_info(PPid, messages), + {messages, VMsgs} = process_info(VPid, messages), + Overloaded = riak_core_vnode_proxy:overloaded(PPid), + {Overloaded, {VMsgs, PMsgs}, sys:get_status(PPid)}. overloaded_post(#tstate{threshold = undefined}, _A, - {R, _VnodeQ, _ProxyStatus}) -> + {R, _Messages, _ProxyStatus}) -> %% If there are no thresholds there should never be an overload eq(R, false); overloaded_post(#tstate{vnode_running = true}, _A, - {R, _VnodeQ = 0, _ProxyStatus}) -> + {R, _Messages, _ProxyStatus}) -> %% If the vnode is running, we have cleared queues so %% should not be in overload. eq(R, false); overloaded_post(#tstate{vnode_running = false, proxy_msgs = ProxyMsgs, threshold = Threshold}, _A, - {ResultOverload, _VnodeQ, _ProxyStatus}) -> + {ResultOverload, _Messages, _ProxyStatus}) -> %% Either %% mailbox is completely an estimate based on proxy msgs %% or mailbox is a check + estimate since @@ -392,16 +394,33 @@ prep_env(Var, Val) -> %% Wait until all messages are drained by the Pid. No guarantees %% about future messages being sent, or that responses for the %% last message consumed have been transmitted. -%% -drain(Pid) -> - case erlang:process_info(Pid, message_queue_len) of - {message_queue_len, 0} -> +%% NOTE: The "drain 3 times in a row" was determined empirically, +%% and may not be sufficient (2 was not). Given time constraints, +%% living with it for now. If this fails, we should really add some +%% tracing code around the send of messages to Vnode and Proxy to +%% determine where extra messages are coming from rather than just +%% make this "try 4 times" +%% +drain(Pid) when is_pid(Pid) -> + drain([Pid], {-1, -1}); + +drain(Pids) when is_list(Pids) -> + drain(Pids, {-1, -1}). +drain(Pids, {PrevPrev, Prev}) -> + _ = [sys:suspend(Pid) || Pid <- Pids], + Len = lists:foldl(fun(Pid, Acc0) -> + {message_queue_len, Len} = erlang:process_info(Pid, message_queue_len), + Acc0 + Len + end, 0, Pids), + _ = [sys:resume(Pid) || Pid <- Pids], + case {PrevPrev, Prev, Len} of + {0, 0, 0} -> ok; - {message_queue_len, L} when L > 0 -> - timer:sleep(1), % give it a millisecond to drain - drain(Pid); - ER -> - ER + _ -> + %% Attempt to ensure something else is scheduled before we try to drain again + erlang:yield(), + timer:sleep(1), + drain(Pids, {Prev, Len}) end. %% Return the length of the message queue (or crash if proc dead) @@ -457,3 +476,14 @@ confirm() -> pass. -endif. + + +wait_for_vnode_change(VPid0, Index) -> + {ok, VPid1} = riak_core_vnode_manager:get_vnode_pid(Index, riak_kv_vnode), + case VPid1 of + VPid0 -> + timer:sleep(1), + wait_for_vnode_change(VPid0, Index); + _ -> + VPid1 + end. From e36c94e3df5ad9c081360e7de9603eb8ec96511e Mon Sep 17 00:00:00 2001 From: Nick Marino Date: Tue, 1 Dec 2015 12:05:52 -0500 Subject: [PATCH 05/11] Log PIDs by default for console output We've run into a number of issues over time where errant processes hang around and stomp on other tests, and without PIDs in the logging it can be difficult or impossible to conclusively identify and debug these kinds of problems. --- src/riak_test_escript.erl | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/riak_test_escript.erl b/src/riak_test_escript.erl index b883364cc..e5c7449ef 100644 --- a/src/riak_test_escript.erl +++ b/src/riak_test_escript.erl @@ -110,8 +110,9 @@ main(Args) -> notice end, + Formatter = {lager_default_formatter, [time," [",severity,"] ", pid, " ", message, "\n"]}, application:set_env(lager, error_logger_hwm, 250), %% helpful for debugging - application:set_env(lager, handlers, [{lager_console_backend, ConsoleLagerLevel}, + application:set_env(lager, handlers, [{lager_console_backend, [ConsoleLagerLevel, Formatter]}, {lager_file_backend, [{file, "log/test.log"}, {level, ConsoleLagerLevel}]}]), lager:start(), From 3c8dce8962fb6d61489e19afb7d2905bbb7133a4 Mon Sep 17 00:00:00 2001 From: Mark Allen Date: Tue, 1 Dec 2015 13:00:09 -0600 Subject: [PATCH 06/11] Return 0 instead of doing another rt:sysread Previously, wait_until_read function would execute another rt:systest_read call and return a count of errors, when we already asserted above that the list is empty. If the wait_until function does not return true, or times out, or hits the retry limit, the running test ought to fail anyway without needing an additional read to fail. So doing another call is redundant and unnecessary; we just return 0 as the value if rt:wait_until completes successfully. --- tests/repl_util.erl | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/tests/repl_util.erl b/tests/repl_util.erl index a7be23f35..d8984374c 100644 --- a/tests/repl_util.erl +++ b/tests/repl_util.erl @@ -195,14 +195,17 @@ wait_until_fullsync_stopped(SourceLeader) -> end). wait_for_reads(Node, Start, End, Bucket, R) -> - rt:wait_until(Node, + ok = rt:wait_until(Node, fun(_) -> Reads = rt:systest_read(Node, Start, End, Bucket, R, <<>>, true), Reads == [] end), - Reads = rt:systest_read(Node, Start, End, Bucket, R, <<>>, true), - lager:info("Reads: ~p", [Reads]), - length(Reads). + %% rt:systest_read/6 returns a list of errors encountered while performing + %% the requested reads. Since we are asserting this list is empty above, + %% we already know that if we reached here, that the list of reads has + %% no errors. Therefore, we simply return 0 and do not execute another + %% systest_read call. + 0. get_fs_coord_status_item(Node, SinkName, ItemName) -> Status = rpc:call(Node, riak_repl_console, status, [quiet]), From 348dcf36b34ccf2f650822d0d793c3bc14389a65 Mon Sep 17 00:00:00 2001 From: Mark Allen Date: Tue, 1 Dec 2015 21:39:52 -0600 Subject: [PATCH 07/11] Fix function spec for wait_until/2 --- src/rt.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rt.erl b/src/rt.erl index 829eefac5..05629b29e 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -664,7 +664,7 @@ wait_until(Fun) when is_function(Fun) -> %% @doc Convenience wrapper for wait_until for the myriad functions that %% take a node as single argument. --spec wait_until([node()], fun((node()) -> boolean())) -> ok. +-spec wait_until(node(), fun((node()) -> boolean())) -> ok | {fail, Result :: term()}. wait_until(Node, Fun) when is_atom(Node), is_function(Fun) -> wait_until(fun() -> Fun(Node) end); From 3c88243cf94fefe21273d5ab8a6d2d6deb9ff876 Mon Sep 17 00:00:00 2001 From: Mark Allen Date: Tue, 1 Dec 2015 21:43:20 -0600 Subject: [PATCH 08/11] And also the function signature --- src/rt.erl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/rt.erl b/src/rt.erl index 05629b29e..8dfc385da 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -664,7 +664,7 @@ wait_until(Fun) when is_function(Fun) -> %% @doc Convenience wrapper for wait_until for the myriad functions that %% take a node as single argument. --spec wait_until(node(), fun((node()) -> boolean())) -> ok | {fail, Result :: term()}. +-spec wait_until(node(), fun(() -> boolean())) -> ok | {fail, Result :: term()}. wait_until(Node, Fun) when is_atom(Node), is_function(Fun) -> wait_until(fun() -> Fun(Node) end); From e9aa193a998307ed7283f68e2fbb38df775af73e Mon Sep 17 00:00:00 2001 From: Luke Bakken Date: Tue, 26 Jan 2016 04:59:05 -0800 Subject: [PATCH 09/11] Ensure git adds everything and ignores any global ignore settings --- bin/rtdev-install.sh | 2 +- bin/rtdev-setup-releases.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bin/rtdev-install.sh b/bin/rtdev-install.sh index 00ed12001..920ac9ecc 100755 --- a/bin/rtdev-install.sh +++ b/bin/rtdev-install.sh @@ -37,5 +37,5 @@ echo " - Writing $RT_DEST_DIR/$RELEASE/VERSION" echo -n $VERSION > $RT_DEST_DIR/$RELEASE/VERSION cd $RT_DEST_DIR echo " - Reinitializing git state" -git add . +git add --all --force . git commit -a -m "riak_test init" --amend > /dev/null diff --git a/bin/rtdev-setup-releases.sh b/bin/rtdev-setup-releases.sh index a692e5a21..cb2756ee7 100755 --- a/bin/rtdev-setup-releases.sh +++ b/bin/rtdev-setup-releases.sh @@ -40,6 +40,6 @@ git init git config user.name "Riak Test" git config user.email "dev@basho.com" -git add . +git add --all --force . git commit -a -m "riak_test init" > /dev/null echo " - Successfully completed initial git commit of $RT_DEST_DIR" From 5fb3e40d4b0041dd8d1df0334d36ea991910fba5 Mon Sep 17 00:00:00 2001 From: Luke Bakken Date: Tue, 26 Jan 2016 06:17:44 -0800 Subject: [PATCH 10/11] Add test to query for single object using 2i exact match --- tests/verify_2i_returnterms.erl | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/tests/verify_2i_returnterms.erl b/tests/verify_2i_returnterms.erl index 7a9f50ee4..f2155ce1e 100644 --- a/tests/verify_2i_returnterms.erl +++ b/tests/verify_2i_returnterms.erl @@ -25,6 +25,8 @@ stream_pb/3, http_query/3]). -define(BUCKET, <<"2ibucket">>). -define(FOO, <<"foo">>). +-define(BAZ, <<"baz">>). +-define(BAT, <<"bat">>). -define(Q_OPTS, [{return_terms, true}]). confirm() -> @@ -38,15 +40,20 @@ confirm() -> [put_an_object(PBPid, N) || N <- lists:seq(0, 100)], [put_an_object(PBPid, int_to_key(N), N, ?FOO) || N <- lists:seq(101, 200)], + put_an_object(PBPid, int_to_key(201), 201, ?BAZ), + put_an_object(PBPid, int_to_key(202), 202, ?BAT), %% Bucket, key, and index_eq queries should ignore `return_terms' - ExpectedKeys = lists:sort([int_to_key(N) || N <- lists:seq(0, 200)]), + ExpectedKeys = lists:sort([int_to_key(N) || N <- lists:seq(0, 202)]), assertEqual(RiakHttp, PBPid, ExpectedKeys, {<<"$key">>, int_to_key(0), int_to_key(999)}, ?Q_OPTS, keys), assertEqual(RiakHttp, PBPid, ExpectedKeys, { <<"$bucket">>, ?BUCKET}, ?Q_OPTS, keys), ExpectedFooKeys = lists:sort([int_to_key(N) || N <- lists:seq(101, 200)]), assertEqual(RiakHttp, PBPid, ExpectedFooKeys, {<<"field1_bin">>, ?FOO}, ?Q_OPTS, keys), + assertEqual(RiakHttp, PBPid, [int_to_key(201)], {<<"field1_bin">>, ?BAZ}, ?Q_OPTS, keys), + assertEqual(RiakHttp, PBPid, [int_to_key(201)], {<<"field2_int">>, 201}, ?Q_OPTS, keys), + ExpectedRangeResults = lists:sort([{list_to_binary(integer_to_list(N)), int_to_key(N)} || N <- lists:seq(1, 100)]), assertEqual(RiakHttp, PBPid, ExpectedRangeResults, {<<"field2_int">>, "1", "100"}, ?Q_OPTS, results), From 3a6a2bc68c012925f5022eb70d24010b4a3673f3 Mon Sep 17 00:00:00 2001 From: Doug Rohrer Date: Tue, 26 Jan 2016 15:40:51 +0000 Subject: [PATCH 11/11] Add another object to 2i with bucket types test in order to attempt to reproduce issue in riak_kv#1329 --- tests/bucket_types.erl | 22 +++++++++++++++++++--- 1 file changed, 19 insertions(+), 3 deletions(-) diff --git a/tests/bucket_types.erl b/tests/bucket_types.erl index 1715cee82..92f29a762 100644 --- a/tests/bucket_types.erl +++ b/tests/bucket_types.erl @@ -285,6 +285,7 @@ confirm() -> true -> Obj01 = riakc_obj:new(<<"test">>, <<"JRD">>, <<"John Robert Doe, 25">>), Obj02 = riakc_obj:new({Type, <<"test">>}, <<"JRD">>, <<"Jane Rachel Doe, 21">>), + Obj03 = riakc_obj:new({Type, <<"test">>}, <<"JRD2">>, <<"Jane2 Rachel2 Doe2, 22">>), Obj1 = riakc_obj:update_metadata(Obj01, riakc_obj:set_secondary_index( @@ -302,8 +303,16 @@ confirm() -> [<<"Jane">>, <<"Rachel">> ,<<"Doe">>]}])), - riakc_pb_socket:put(PB, Obj1), - riakc_pb_socket:put(PB, Obj2), + Obj3 = riakc_obj:update_metadata(Obj03, + riakc_obj:set_secondary_index( + riakc_obj:get_update_metadata(Obj03), + [{{integer_index, "age"}, + [22]},{{binary_index, "name"}, + [<<"Jane2">>, <<"Rachel2">> + ,<<"Doe2">>]}])), + ok = riakc_pb_socket:put(PB, Obj1), + ok = riakc_pb_socket:put(PB, Obj2), + ok = riakc_pb_socket:put(PB, Obj3), ?assertMatch({ok, {index_results_v1, [<<"JRD">>], _, _}}, riakc_pb_socket:get_index(PB, <<"test">>, {binary_index, @@ -322,7 +331,14 @@ confirm() -> "name"}, <<"Jane">>)), - %% wild stab at the undocumented cs_bucket_fold + ?assertMatch({ok, {index_results_v1, [<<"JRD2">>], _, _}}, riakc_pb_socket:get_index(PB, + {Type, + <<"test">>}, + {binary_index, + "name"}, + <<"Jane2">>)), + + %% wild stab at the undocumented cs_bucket_fold {ok, ReqID} = riakc_pb_socket:cs_bucket_fold(PB, <<"test">>, []), accumulate(ReqID),