From 80165f56970d9630c6b8bb4656415750ac97f992 Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Tue, 29 Jul 2014 18:50:48 -0400 Subject: [PATCH 01/17] Move configuration functions to rt_config. - functions include set_conf/2, set_advanced_conf/2, and update_app_config/2. --- src/rt_config.erl | 36 ++++++++++++++++++- tests/cluster_meta_rmr.erl | 2 +- tests/gh_riak_core_155.erl | 2 +- tests/gh_riak_core_176.erl | 4 +-- tests/http_security.erl | 2 +- tests/overload.erl | 4 +-- tests/replication/repl_bucket_types.erl | 2 +- tests/replication/repl_cancel_fullsync.erl | 2 +- tests/replication/repl_fs_bench.erl | 2 +- tests/replication/repl_fs_stat_caching.erl | 2 +- tests/replication/repl_location_failures.erl | 2 +- tests/replication/repl_rt_cascading_rtq.erl | 2 +- tests/replication/replication.erl | 2 +- .../replication/replication2_connections.erl | 6 ++-- tests/replication/replication2_pg.erl | 8 ++--- tests/replication/replication2_ssl.erl | 10 +++--- .../replication_object_reformat.erl | 6 ++-- tests/replication/replication_ssl.erl | 10 +++--- tests/replication/replication_stats.erl | 2 +- tests/replication/rt_cascading.erl | 2 +- tests/test_cluster.erl | 2 +- tests/verify_capabilities.erl | 6 ++-- tests/verify_dynamic_ring.erl | 4 +-- tests/verify_handoff.erl | 4 +-- tests/verify_riak_object_reformat.erl | 2 +- tests/verify_riak_stats.erl | 2 +- tests/verify_tick_change.erl | 2 +- tests/verify_vclock.erl | 2 +- 28 files changed, 83 insertions(+), 49 deletions(-) diff --git a/src/rt_config.erl b/src/rt_config.erl index 4a916de63..39ef3788e 100644 --- a/src/rt_config.erl +++ b/src/rt_config.erl @@ -28,9 +28,14 @@ get_os_env/1, get_os_env/2, load/2, - set/2 + set/2, + set_conf/2, + set_advanced_conf/2, + update_app_config/2 ]). +-define(HARNESS, (rt_config:get(rt_harness))). + %% @doc Get the value of an OS Environment variable. The arity 1 version of %% this function will fail the test if it is undefined. get_os_env(Var) -> @@ -122,6 +127,35 @@ config_or_os_env(Config, Default) -> V end. + +-spec set_conf(atom(), [{string(), string()}]) -> ok. +set_conf(all, NameValuePairs) -> + ?HARNESS:set_conf(all, NameValuePairs); +set_conf(Node, NameValuePairs) -> + rt:stop(Node), + ?assertEqual(ok, rt:wait_until_unpingable(Node)), + ?HARNESS:set_conf(Node, NameValuePairs), + rt:start(Node). + +-spec set_advanced_conf(atom(), [{string(), string()}]) -> ok. +set_advanced_conf(all, NameValuePairs) -> + ?HARNESS:set_advanced_conf(all, NameValuePairs); +set_advanced_conf(Node, NameValuePairs) -> + rt:stop(Node), + ?assertEqual(ok, rt:wait_until_unpingable(Node)), + ?HARNESS:set_advanced_conf(Node, NameValuePairs), + rt:start(Node). + +%% @doc Rewrite the given node's app.config file, overriding the varialbes +%% in the existing app.config with those in `Config'. +update_app_config(all, Config) -> + ?HARNESS:update_app_config(all, Config); +update_app_config(Node, Config) -> + rt:stop(Node), + ?assertEqual(ok, rt:wait_until_unpingable(Node)), + ?HARNESS:update_app_config(Node, Config), + rt:start(Node). + to_upper(S) -> lists:map(fun char_to_upper/1, S). char_to_upper(C) when C >= $a, C =< $z -> C bxor $\s; char_to_upper(C) -> C. diff --git a/tests/cluster_meta_rmr.erl b/tests/cluster_meta_rmr.erl index bc5fcab11..beabc0714 100644 --- a/tests/cluster_meta_rmr.erl +++ b/tests/cluster_meta_rmr.erl @@ -24,7 +24,7 @@ -define(CM_PREFIX, {test, cm}). confirm() -> - rt:set_conf(all, [{"ring_size", "128"}]), + rt_config:set_conf(all, [{"ring_size", "128"}]), Seed = erlang:now(), lager:info("SEED: ~p", [Seed]), random:seed(Seed), diff --git a/tests/gh_riak_core_155.erl b/tests/gh_riak_core_155.erl index 6a6e7da82..5bd4bb17d 100644 --- a/tests/gh_riak_core_155.erl +++ b/tests/gh_riak_core_155.erl @@ -34,7 +34,7 @@ confirm() -> lager:info("Adding delayed start to app.config"), NewConfig = [{riak_core, [{delayed_start, 1000}]}], - rt:update_app_config(Node, NewConfig), + rt_config:update_app_config(Node, NewConfig), %% Restart node, add intercept that delay proxy startup, and issue gets. %% Gets will come in before proxies started, and should trigger crash. diff --git a/tests/gh_riak_core_176.erl b/tests/gh_riak_core_176.erl index 00821a615..3f7b351e1 100644 --- a/tests/gh_riak_core_176.erl +++ b/tests/gh_riak_core_176.erl @@ -47,7 +47,7 @@ confirm() -> lager:info("Change ~p handoff_ip from ~p to ~p", [Node2, NodeIP, AlternateIP]), NewConfig = [{riak_core, [{handoff_ip, AlternateIP}]}], - rt:update_app_config(Node2, NewConfig), + rt_config:update_app_config(Node2, NewConfig), rt:wait_for_service(Node2, riak_kv), lager:info("Write data to the cluster"), @@ -62,7 +62,7 @@ confirm() -> %% Check 0.0.0.0 address works lager:info("Change ~p handoff_ip to \"0.0.0.0\"", [Node3]), - rt:update_app_config(Node3, + rt_config:update_app_config(Node3, [{riak_core, [{handoff_ip, "0.0.0.0"}]}]), lager:info("Join ~p to the cluster and wait for handoff to finish", diff --git a/tests/http_security.erl b/tests/http_security.erl index b2e148373..c48e12771 100644 --- a/tests/http_security.erl +++ b/tests/http_security.erl @@ -520,7 +520,7 @@ confirm() -> enable_ssl(Node) -> [{http, {IP, Port}}|_] = rt:connection_info(Node), - rt:update_app_config(Node, [{riak_api, [{https, [{IP, + rt_config:update_app_config(Node, [{riak_api, [{https, [{IP, Port+1000}]}]}]), rt:wait_until_pingable(Node), rt:wait_for_service(Node, riak_kv). diff --git a/tests/overload.erl b/tests/overload.erl index 7dcd5ef1d..320af76f3 100644 --- a/tests/overload.erl +++ b/tests/overload.erl @@ -70,7 +70,7 @@ test_vnode_protection(Nodes, Victim, RO) -> Config2 = [{riak_core, [{vnode_overload_threshold, ?THRESHOLD}, {vnode_check_interval, 1}]}], rt:pmap(fun(Node) -> - rt:update_app_config(Node, Config2) + rt_config:update_app_config(Node, Config2) end, Nodes), {NumProcs2, QueueLen2} = run_test(Nodes, Victim, RO), ?assert(NumProcs2 =< (2*?THRESHOLD * 1.5)), @@ -107,7 +107,7 @@ test_fsm_protection(Nodes, Victim, RO) -> lager:info("Setting FSM limit to ~b", [?THRESHOLD]), Config3 = [{riak_kv, [{fsm_limit, ?THRESHOLD}]}], rt:pmap(fun(Node) -> - rt:update_app_config(Node, Config3) + rt_config:update_app_config(Node, Config3) end, Nodes), {NumProcs4, QueueLen4} = run_test(Nodes, Victim, RO), ?assert(NumProcs4 =< (?THRESHOLD * 1.1)), diff --git a/tests/replication/repl_bucket_types.erl b/tests/replication/repl_bucket_types.erl index 6e907d863..b643bfd5b 100644 --- a/tests/replication/repl_bucket_types.erl +++ b/tests/replication/repl_bucket_types.erl @@ -16,7 +16,7 @@ %% setup(Type) -> - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), + rt_config:set_conf(all, [{"buckets.default.allow_mult", "false"}]), {LeaderA, LeaderB, ANodes, BNodes} = ClusterNodes = make_clusters(Type), diff --git a/tests/replication/repl_cancel_fullsync.erl b/tests/replication/repl_cancel_fullsync.erl index a2fb59560..435d6927e 100644 --- a/tests/replication/repl_cancel_fullsync.erl +++ b/tests/replication/repl_cancel_fullsync.erl @@ -33,7 +33,7 @@ %% @doc Ensure we can cancel a fullsync and restart it. confirm() -> - rt:set_advanced_conf(all, ?CONF(5)), + rt_config:set_advanced_conf(all, ?CONF(5)), Nodes = [ANodes, BNodes] = rt:build_clusters([3, 3]), diff --git a/tests/replication/repl_fs_bench.erl b/tests/replication/repl_fs_bench.erl index 23a5a9eca..7736fcb6c 100644 --- a/tests/replication/repl_fs_bench.erl +++ b/tests/replication/repl_fs_bench.erl @@ -60,7 +60,7 @@ confirm() -> %% @doc Perform a fullsync, with given latency injected via intercept %% and return times for each fullsync time. fullsync_test(Strategy, Latency) -> - rt:set_advanced_conf(all, ?CONF(Strategy)), + rt_config:set_advanced_conf(all, ?CONF(Strategy)), [ANodes, BNodes] = rt:build_clusters([3, 3]), diff --git a/tests/replication/repl_fs_stat_caching.erl b/tests/replication/repl_fs_stat_caching.erl index 93d06c282..7a642c0fb 100644 --- a/tests/replication/repl_fs_stat_caching.erl +++ b/tests/replication/repl_fs_stat_caching.erl @@ -34,7 +34,7 @@ confirm() -> pass. setup() -> - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), + rt_config:set_conf(all, [{"buckets.default.allow_mult", "false"}]), NodeCount = rt_config:get(num_nodes, 6), lager:info("Deploy ~p nodes", [NodeCount]), diff --git a/tests/replication/repl_location_failures.erl b/tests/replication/repl_location_failures.erl index bf7aeb1fe..6fe89d075 100644 --- a/tests/replication/repl_location_failures.erl +++ b/tests/replication/repl_location_failures.erl @@ -34,7 +34,7 @@ ]). confirm() -> - rt:set_advanced_conf(all, ?CONF(5)), + rt_config:set_advanced_conf(all, ?CONF(5)), [ANodes, BNodes] = rt:build_clusters([3, 3]), diff --git a/tests/replication/repl_rt_cascading_rtq.erl b/tests/replication/repl_rt_cascading_rtq.erl index f8edad8d6..af9960889 100644 --- a/tests/replication/repl_rt_cascading_rtq.erl +++ b/tests/replication/repl_rt_cascading_rtq.erl @@ -6,7 +6,7 @@ -define(TEST_BUCKET, <<"rt-cascading-rtq-systest-a">>). setup() -> - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), + rt_config:set_conf(all, [{"buckets.default.allow_mult", "false"}]), {SourceLeader, SinkLeaderA, SinkLeaderB, _, _, _} = ClusterNodes = make_clusters(), diff --git a/tests/replication/replication.erl b/tests/replication/replication.erl index f659ba94f..5fa70cbed 100644 --- a/tests/replication/replication.erl +++ b/tests/replication/replication.erl @@ -21,7 +21,7 @@ confirm() -> {diff_batch_size, 10} ]} ], - rt:set_advanced_conf(all, Conf), + rt_config:set_advanced_conf(all, Conf), [ANodes, BNodes] = rt:build_clusters([3, 3]), replication(ANodes, BNodes, false), pass. diff --git a/tests/replication/replication2_connections.erl b/tests/replication/replication2_connections.erl index 983d008a7..da81a29fc 100644 --- a/tests/replication/replication2_connections.erl +++ b/tests/replication/replication2_connections.erl @@ -44,7 +44,7 @@ simple_test() -> {rt_heartbeat_timeout, ?HB_TIMEOUT} ]}], - rt:set_advanced_conf(all, Conf), + rt_config:set_advanced_conf(all, Conf), [ANodes, BNodes] = rt:build_clusters([3, 3]), @@ -106,7 +106,7 @@ disconnect_test() -> {rt_heartbeat_timeout, ?HB_TIMEOUT} ]}], - rt:set_advanced_conf(all, Conf), + rt_config:set_advanced_conf(all, Conf), [ANodes, BNodes] = rt:build_clusters([3, 3]), @@ -173,7 +173,7 @@ error_cleanup_test() -> {cm_cancellation_interval, 5 * 1000} ]}], - rt:set_advanced_conf(all, Conf), + rt_config:set_advanced_conf(all, Conf), [ANodes, BNodes] = rt:build_clusters([3, 3]), diff --git a/tests/replication/replication2_pg.erl b/tests/replication/replication2_pg.erl index e62f42587..4c197d378 100644 --- a/tests/replication/replication2_pg.erl +++ b/tests/replication/replication2_pg.erl @@ -68,7 +68,7 @@ setup_repl_clusters(Conf, SSL) -> ], - rt:set_advanced_conf(all, Conf), + rt_config:set_advanced_conf(all, Conf), Nodes = [ANodes, BNodes, CNodes] = rt:build_clusters([2, 2, 2]), AFirst = hd(ANodes), @@ -84,11 +84,11 @@ setup_repl_clusters(Conf, SSL) -> case SSL of true -> lager:info("Enabling SSL for this test"), - [rt:update_app_config(N, merge_config(SSLConfig1, Conf)) || + [rt_config:update_app_config(N, merge_config(SSLConfig1, Conf)) || N <- ANodes], - [rt:update_app_config(N, merge_config(SSLConfig2, Conf)) || + [rt_config:update_app_config(N, merge_config(SSLConfig2, Conf)) || N <- BNodes], - [rt:update_app_config(N, merge_config(SSLConfig3, Conf)) || + [rt_config:update_app_config(N, merge_config(SSLConfig3, Conf)) || N <- CNodes]; _ -> lager:info("SSL not enabled for this test") diff --git a/tests/replication/replication2_ssl.erl b/tests/replication/replication2_ssl.erl index c857254e6..e8a1bbf0e 100644 --- a/tests/replication/replication2_ssl.erl +++ b/tests/replication/replication2_ssl.erl @@ -7,7 +7,7 @@ confirm() -> %% test requires allow_mult=false - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), + rt_config:set_conf(all, [{"buckets.default.allow_mult", "false"}]), NumNodes = rt_config:get(num_nodes, 6), ClusterASize = rt_config:get(cluster_a_size, 3), @@ -271,10 +271,10 @@ confirm() -> {ANodes, BNodes} = lists:split(ClusterASize, Nodes), lager:info("Reconfiguring nodes with SSL options"), - [rt:update_app_config(N, merge_config(SSLConfig5, BaseConf)) || N <- + [rt_config:update_app_config(N, merge_config(SSLConfig5, BaseConf)) || N <- ANodes], - [rt:update_app_config(N, merge_config(SSLConfig6, BaseConf)) || N <- + [rt_config:update_app_config(N, merge_config(SSLConfig6, BaseConf)) || N <- BNodes], [rt:wait_until_pingable(N) || N <- Nodes], @@ -297,9 +297,9 @@ merge_config(Mixin, Base) -> test_connection({Node1, Config1}, {Node2, Config2}) -> repl_util:disconnect_cluster(Node1, "B"), repl_util:wait_for_disconnect(Node1, "B"), - rt:update_app_config(Node2, Config2), + rt_config:update_app_config(Node2, Config2), rt:wait_until_pingable(Node2), - rt:update_app_config(Node1, Config1), + rt_config:update_app_config(Node1, Config1), rt:wait_until_pingable(Node1), rt:wait_for_service(Node1, [riak_kv, riak_repl]), rt:wait_for_service(Node2, [riak_kv, riak_repl]), diff --git a/tests/replication/replication_object_reformat.erl b/tests/replication/replication_object_reformat.erl index cea645e71..6ee5e9d68 100644 --- a/tests/replication/replication_object_reformat.erl +++ b/tests/replication/replication_object_reformat.erl @@ -164,7 +164,7 @@ verify_replication(AVersion, BVersion, Start, End, Realtime) -> %% @doc Configure two clusters and set up replication between them, %% return the node list of each cluster. configure_clusters(AVersion, BVersion, Realtime) -> - rt:set_advanced_conf(all, ?CONF(infinity)), + rt_config:set_advanced_conf(all, ?CONF(infinity)), Nodes = [ANodes, BNodes] = rt:build_clusters([3, 3]), @@ -173,13 +173,13 @@ configure_clusters(AVersion, BVersion, Realtime) -> lager:info("Updating app config to force ~p on source cluster.", [AVersion]), - [rt:update_app_config(N, [{riak_kv, + [rt_config:update_app_config(N, [{riak_kv, [{object_format, AVersion}]}]) || N <- ANodes], lager:info("Updating app config to force ~p on sink cluster.", [BVersion]), - [rt:update_app_config(N, [{riak_kv, + [rt_config:update_app_config(N, [{riak_kv, [{object_format, BVersion}]}]) || N <- BNodes], diff --git a/tests/replication/replication_ssl.erl b/tests/replication/replication_ssl.erl index fdfb88f23..ff18c7203 100644 --- a/tests/replication/replication_ssl.erl +++ b/tests/replication/replication_ssl.erl @@ -6,7 +6,7 @@ confirm() -> %% test requires allow_mult=false - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), + rt_config:set_conf(all, [{"buckets.default.allow_mult", "false"}]), NumNodes = rt_config:get(num_nodes, 6), ClusterASize = rt_config:get(cluster_a_size, 3), @@ -232,10 +232,10 @@ confirm() -> {ANodes, BNodes} = lists:split(ClusterASize, Nodes), lager:info("Reconfiguring nodes with SSL options"), - [rt:update_app_config(N, merge_config(SSLConfig5, BaseConf)) || N <- + [rt_config:update_app_config(N, merge_config(SSLConfig5, BaseConf)) || N <- ANodes], - [rt:update_app_config(N, merge_config(SSLConfig6, BaseConf)) || N <- + [rt_config:update_app_config(N, merge_config(SSLConfig6, BaseConf)) || N <- BNodes], [rt:wait_until_pingable(N) || N <- Nodes], @@ -254,9 +254,9 @@ merge_config(Mixin, Base) -> lists:ukeymerge(1, lists:keysort(1, Mixin), lists:keysort(1, Base)). test_connection({Node1, Config1}, {Node2, Config2}) -> - rt:update_app_config(Node1, Config1), + rt_config:update_app_config(Node1, Config1), rt:wait_until_pingable(Node1), - rt:update_app_config(Node2, Config2), + rt_config:update_app_config(Node2, Config2), rt:wait_until_pingable(Node2), rt:wait_for_service(Node1, [riak_kv, riak_repl]), rt:wait_for_service(Node2, [riak_kv, riak_repl]), diff --git a/tests/replication/replication_stats.erl b/tests/replication/replication_stats.erl index 2fdcde99a..98734e1af 100644 --- a/tests/replication/replication_stats.erl +++ b/tests/replication/replication_stats.erl @@ -39,7 +39,7 @@ confirm() -> fullsync_enabled_and_started(). fullsync_enabled_and_started() -> - rt:set_advanced_conf(all, ?CONF), + rt_config:set_advanced_conf(all, ?CONF), [ANodes, BNodes] = rt:build_clusters([3, 3]), diff --git a/tests/replication/rt_cascading.erl b/tests/replication/rt_cascading.erl index 8c6702ed8..a47e29b51 100644 --- a/tests/replication/rt_cascading.erl +++ b/tests/replication/rt_cascading.erl @@ -38,7 +38,7 @@ confirm() -> %% test requires allow_mult=false b/c of rt:systest_read - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), + rt_config:set_conf(all, [{"buckets.default.allow_mult", "false"}]), case eunit:test(?MODULE, [verbose]) of ok -> diff --git a/tests/test_cluster.erl b/tests/test_cluster.erl index abe34d661..b19d15bb3 100644 --- a/tests/test_cluster.erl +++ b/tests/test_cluster.erl @@ -26,4 +26,4 @@ confirm() -> Config = [{riak_search, [{enabled, true}]}], rt:build_cluster(4, Config), ?assert(false), - fail. \ No newline at end of file + fail. diff --git a/tests/verify_capabilities.erl b/tests/verify_capabilities.erl index a1ff966df..2730eca85 100644 --- a/tests/verify_capabilities.erl +++ b/tests/verify_capabilities.erl @@ -213,19 +213,19 @@ confirm() -> end, lager:info("Override: (use: legacy), (prefer: proxy)"), - [rt:update_app_config(Node, Override(legacy, proxy)) || Node <- Nodes], + [rt_config:update_app_config(Node, Override(legacy, proxy)) || Node <- Nodes], lager:info("Verify vnode_routing == legacy"), assert_capability(CNode, {riak_core, vnode_routing}, legacy), lager:info("Override: (use: proxy), (prefer: legacy)"), - [rt:update_app_config(Node, Override(proxy, legacy)) || Node <- Nodes], + [rt_config:update_app_config(Node, Override(proxy, legacy)) || Node <- Nodes], lager:info("Verify vnode_routing == proxy"), assert_capability(CNode, {riak_core, vnode_routing}, proxy), lager:info("Override: (prefer: legacy)"), - [rt:update_app_config(Node, Override(undefined, legacy)) || Node <- Nodes], + [rt_config:update_app_config(Node, Override(undefined, legacy)) || Node <- Nodes], lager:info("Verify vnode_routing == legacy"), assert_capability(CNode, {riak_core, vnode_routing}, legacy), diff --git a/tests/verify_dynamic_ring.erl b/tests/verify_dynamic_ring.erl index afd87f267..d1566a1d3 100644 --- a/tests/verify_dynamic_ring.erl +++ b/tests/verify_dynamic_ring.erl @@ -31,8 +31,8 @@ confirm() -> %% test requires allow_mult=false b/c of rt:systest_read - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), - rt:update_app_config(all, [{riak_core, + rt_config:set_conf(all, [{"buckets.default.allow_mult", "false"}]), + rt_config:update_app_config(all, [{riak_core, [{ring_creation_size, ?START_SIZE}]}]), [ANode, AnotherNode, YetAnother, _ReplacingNode] = _AllNodes = rt:deploy_nodes(4), NewNodes = Nodes = [ANode, AnotherNode, YetAnother], diff --git a/tests/verify_handoff.erl b/tests/verify_handoff.erl index f8dd56639..cd7c5290e 100644 --- a/tests/verify_handoff.erl +++ b/tests/verify_handoff.erl @@ -71,11 +71,11 @@ run_test(TestMode, NTestItems, NTestNodes, HandoffEncoding) -> } ], - rt:update_app_config(RootNode, OverrideData), + rt_config:update_app_config(RootNode, OverrideData), %% Update all nodes (capabilities are not re-negotiated): lists:foreach(fun(TestNode) -> - rt:update_app_config(TestNode, OverrideData), + rt_config:update_app_config(TestNode, OverrideData), assert_using(RootNode, { riak_kv, handoff_data_encoding }, HandoffEncoding) end, Nodes) diff --git a/tests/verify_riak_object_reformat.erl b/tests/verify_riak_object_reformat.erl index ca44f9668..cd693f4ff 100644 --- a/tests/verify_riak_object_reformat.erl +++ b/tests/verify_riak_object_reformat.erl @@ -31,7 +31,7 @@ -define(N, 3). confirm() -> - rt:update_app_config(all, [{riak_kv, [{object_format, v1}]}]), + rt_config:update_app_config(all, [{riak_kv, [{object_format, v1}]}]), TestMetaData = riak_test_runner:metadata(), DowngradeVsn = proplists:get_value(upgrade_version, TestMetaData, previous), Nodes = [Node1|_] = rt:build_cluster(?N), diff --git a/tests/verify_riak_stats.erl b/tests/verify_riak_stats.erl index d6b944595..10568b91a 100644 --- a/tests/verify_riak_stats.erl +++ b/tests/verify_riak_stats.erl @@ -126,4 +126,4 @@ get_stats(Node) -> StatString = os:cmd(io_lib:format("curl -s -S ~s/stats", [rt:http_url(Node)])), {struct, Stats} = mochijson2:decode(StatString), %%lager:debug(StatString), - Stats. \ No newline at end of file + Stats. diff --git a/tests/verify_tick_change.erl b/tests/verify_tick_change.erl index 3390fbac8..208adcdd4 100644 --- a/tests/verify_tick_change.erl +++ b/tests/verify_tick_change.erl @@ -25,7 +25,7 @@ confirm() -> ClusterSize = 4, - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), + rt_config:set_conf(all, [{"buckets.default.allow_mult", "false"}]), NewConfig = [], Nodes = rt:build_cluster(ClusterSize, NewConfig), ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), diff --git a/tests/verify_vclock.erl b/tests/verify_vclock.erl index 64e03ea1e..dcd24c690 100644 --- a/tests/verify_vclock.erl +++ b/tests/verify_vclock.erl @@ -113,7 +113,7 @@ force_encoding(Node, EncodingMethod) -> } ], - rt:update_app_config(Node, OverrideData) + rt_config:update_app_config(Node, OverrideData) end. From 0a5c46b88d07b5ac97e94116e21de1abfd17e9da Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 08:32:37 -0400 Subject: [PATCH 02/17] Continue to move config functions from rt to rt_config. --- src/rt.erl | 40 +++------------------------------------- src/rt_config.erl | 6 +++++- 2 files changed, 8 insertions(+), 38 deletions(-) diff --git a/src/rt.erl b/src/rt.erl index 586cc7e2a..94c4b531d 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -102,8 +102,6 @@ rpc_get_env/2, set_backend/1, set_backend/2, - set_conf/2, - set_advanced_conf/2, setup_harness/2, setup_log_capture/1, slow_upgrade/3, @@ -126,7 +124,6 @@ systest_write/5, systest_write/6, teardown/0, - update_app_config/2, upgrade/2, upgrade/3, versions/0, @@ -198,34 +195,6 @@ str(String, Substr) -> _ -> true end. --spec set_conf(atom(), [{string(), string()}]) -> ok. -set_conf(all, NameValuePairs) -> - ?HARNESS:set_conf(all, NameValuePairs); -set_conf(Node, NameValuePairs) -> - stop(Node), - ?assertEqual(ok, rt:wait_until_unpingable(Node)), - ?HARNESS:set_conf(Node, NameValuePairs), - start(Node). - --spec set_advanced_conf(atom(), [{string(), string()}]) -> ok. -set_advanced_conf(all, NameValuePairs) -> - ?HARNESS:set_advanced_conf(all, NameValuePairs); -set_advanced_conf(Node, NameValuePairs) -> - stop(Node), - ?assertEqual(ok, rt:wait_until_unpingable(Node)), - ?HARNESS:set_advanced_conf(Node, NameValuePairs), - start(Node). - -%% @doc Rewrite the given node's app.config file, overriding the varialbes -%% in the existing app.config with those in `Config'. -update_app_config(all, Config) -> - ?HARNESS:update_app_config(all, Config); -update_app_config(Node, Config) -> - stop(Node), - ?assertEqual(ok, rt:wait_until_unpingable(Node)), - ?HARNESS:update_app_config(Node, Config), - start(Node). - %% @doc Helper that returns first successful application get_env result, %% used when different versions of Riak use different app vars for %% the same setting. @@ -306,16 +275,13 @@ deploy_nodes(NumNodes, InitialConfig) when is_integer(NumNodes) -> NodeConfig = [{current, InitialConfig} || _ <- lists:seq(1,NumNodes)], deploy_nodes(NodeConfig); deploy_nodes(Versions, Services) -> - NodeConfig = [ version_to_config(Version) || Version <- Versions ], + NodeConfig = [ rt_config:version_to_config(Version) || Version <- Versions ], Nodes = ?HARNESS:deploy_nodes(NodeConfig), lager:info("Waiting for services ~p to start on ~p.", [Services, Nodes]), [ ok = wait_for_service(Node, Service) || Node <- Nodes, Service <- Services ], Nodes. -version_to_config(Config) when is_tuple(Config)-> Config; -version_to_config(Version) -> {Version, default}. - deploy_clusters(Settings) -> ClusterConfigs = [case Setting of Configs when is_list(Configs) -> @@ -1562,12 +1528,12 @@ set_backend(multi, Extras) -> set_backend(riak_kv_multi_backend, Extras); set_backend(Backend, _) when Backend == riak_kv_bitcask_backend; Backend == riak_kv_eleveldb_backend; Backend == riak_kv_memory_backend -> lager:info("rt:set_backend(~p)", [Backend]), - update_app_config(all, [{riak_kv, [{storage_backend, Backend}]}]), + rt_config:update_app_config(all, [{riak_kv, [{storage_backend, Backend}]}]), get_backends(); set_backend(Backend, Extras) when Backend == riak_kv_multi_backend -> MultiConfig = proplists:get_value(multi_config, Extras, default), Config = make_multi_backend_config(MultiConfig), - update_app_config(all, [{riak_kv, Config}]), + rt_config:update_app_config(all, [{riak_kv, Config}]), get_backends(); set_backend(Other, _) -> lager:warning("rt:set_backend doesn't recognize ~p as a legit backend, using the default.", [Other]), diff --git a/src/rt_config.erl b/src/rt_config.erl index 39ef3788e..8da5a9539 100644 --- a/src/rt_config.erl +++ b/src/rt_config.erl @@ -31,7 +31,8 @@ set/2, set_conf/2, set_advanced_conf/2, - update_app_config/2 + update_app_config/2, + version_to_config/1 ]). -define(HARNESS, (rt_config:get(rt_harness))). @@ -156,6 +157,9 @@ update_app_config(Node, Config) -> ?HARNESS:update_app_config(Node, Config), rt:start(Node). +version_to_config(Config) when is_tuple(Config)-> Config; +version_to_config(Version) -> {Version, default}. + to_upper(S) -> lists:map(fun char_to_upper/1, S). char_to_upper(C) when C >= $a, C =< $z -> C bxor $\s; char_to_upper(C) -> C. From 541dce5727aff42af9e3764cb2a08d033b7c292e Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 10:10:30 -0400 Subject: [PATCH 03/17] Migrate several more functions into rt_cluster from rt. --- src/riak_test_escript.erl | 2 +- src/rt.erl | 146 ---------------- src/rt_cluster.erl | 157 +++++++++++++++++- tests/cluster_meta_rmr.erl | 4 +- tests/cuttlefish_configuration.erl | 2 +- tests/ensemble_util.erl | 8 +- tests/gh_riak_core_154.erl | 2 +- tests/gh_riak_core_155.erl | 2 +- tests/gh_riak_core_176.erl | 2 +- tests/gh_riak_kv_765.erl | 6 +- tests/http_bucket_types.erl | 2 +- tests/http_security.erl | 2 +- tests/jmx_verify.erl | 6 +- tests/loaded_upgrade.erl | 2 +- tests/mapred_basic_compat.erl | 2 +- tests/mapred_buffer_prereduce.erl | 2 +- tests/mapred_dead_pipe.erl | 2 +- tests/mapred_http_errors.erl | 2 +- tests/mapred_javascript.erl | 2 +- tests/mapred_notfound_failover.erl | 2 +- tests/mapred_search_switch.erl | 2 +- tests/mapred_verify_rt.erl | 2 +- tests/overload.erl | 2 +- tests/partition_repair.erl | 4 +- tests/pb_cipher_suites.erl | 2 +- tests/pb_security.erl | 2 +- tests/pipe_verify_basics.erl | 2 +- tests/pipe_verify_examples.erl | 2 +- tests/pipe_verify_exceptions.erl | 2 +- tests/pipe_verify_handoff.erl | 2 +- tests/pipe_verify_handoff_blocking.erl | 2 +- .../pipe_verify_restart_input_forwarding.erl | 2 +- tests/pipe_verify_sink_types.erl | 2 +- tests/post_generate_key.erl | 2 +- tests/pr_pw.erl | 2 +- tests/replication/repl_aae_fullsync.erl | 8 +- tests/replication/repl_bucket_types.erl | 6 +- tests/replication/repl_cancel_fullsync.erl | 6 +- .../repl_consistent_object_filter.erl | 2 +- tests/replication/repl_fs_bench.erl | 6 +- tests/replication/repl_fs_stat_caching.erl | 2 +- tests/replication/repl_location_failures.erl | 6 +- tests/replication/repl_reduced.erl | 16 +- tests/replication/repl_rt_cascading_rtq.erl | 4 +- tests/replication/repl_rt_heartbeat.erl | 2 +- tests/replication/repl_rt_overload.erl | 2 +- tests/replication/repl_rt_pending.erl | 2 +- tests/replication/replication.erl | 2 +- .../replication/replication2_connections.erl | 18 +- .../replication2_console_tests.erl | 2 +- tests/replication/replication2_fsschedule.erl | 6 +- tests/replication/replication2_pg.erl | 2 +- .../replication2_rt_sink_connection.erl | 2 +- tests/replication/replication2_ssl.erl | 4 +- tests/replication/replication2_upgrade.erl | 2 +- .../replication_object_reformat.erl | 4 +- tests/replication/replication_ssl.erl | 4 +- tests/replication/replication_stats.erl | 6 +- tests/replication/replication_upgrade.erl | 2 +- tests/replication/rt_cascading.erl | 42 ++--- tests/riak_admin_console_tests.erl | 2 +- tests/riak_control.erl | 2 +- tests/riak_control_authentication.erl | 2 +- tests/riak_rex.erl | 4 +- tests/riaknostic_rt.erl | 2 +- tests/rolling_capabilities.erl | 2 +- tests/rt_basic_test.erl | 2 +- tests/sibling_explosion.erl | 2 +- tests/test_cluster.erl | 2 +- tests/verify_2i_aae.erl | 2 +- tests/verify_2i_limit.erl | 2 +- tests/verify_2i_mixed_cluster.erl | 2 +- tests/verify_2i_returnterms.erl | 2 +- tests/verify_2i_stream.erl | 2 +- tests/verify_2i_timeout.erl | 2 +- tests/verify_aae.erl | 6 +- tests/verify_api_timeouts.erl | 2 +- tests/verify_asis_put.erl | 2 +- tests/verify_backup_restore.erl | 6 +- tests/verify_basic_upgrade.erl | 2 +- tests/verify_bitcask_tombstone2_upgrade.erl | 2 +- tests/verify_busy_dist_port.erl | 2 +- tests/verify_capabilities.erl | 2 +- tests/verify_commit_hooks.erl | 2 +- tests/verify_conditional_postcommit.erl | 2 +- tests/verify_counter_capability.erl | 2 +- tests/verify_counter_converge.erl | 2 +- tests/verify_counter_repl.erl | 2 +- tests/verify_crdt_capability.erl | 2 +- tests/verify_cs_bucket.erl | 2 +- tests/verify_down.erl | 2 +- tests/verify_dt_context.erl | 2 +- tests/verify_dt_converge.erl | 2 +- tests/verify_dt_upgrade.erl | 2 +- tests/verify_dvv_repl.erl | 2 +- tests/verify_dynamic_ring.erl | 2 +- tests/verify_handoff.erl | 4 +- tests/verify_handoff_mixed.erl | 2 +- tests/verify_kv_health_check.erl | 2 +- tests/verify_link_walk_urls.erl | 2 +- tests/verify_listkeys.erl | 2 +- tests/verify_listkeys_eqcfsm.erl | 4 +- tests/verify_membackend.erl | 12 +- tests/verify_mr_prereduce_node_down.erl | 2 +- tests/verify_no_writes_on_read.erl | 2 +- tests/verify_object_limits.erl | 2 +- tests/verify_reset_bucket_props.erl | 2 +- tests/verify_riak_lager.erl | 2 +- tests/verify_riak_object_reformat.erl | 2 +- tests/verify_riak_stats.erl | 2 +- tests/verify_search.erl | 2 +- tests/verify_secondary_index_reformat.erl | 2 +- tests/verify_snmp.erl | 2 +- tests/verify_staged_clustering.erl | 2 +- tests/verify_tick_change.erl | 2 +- tests/verify_vclock.erl | 4 +- tests/verify_vclock_encoding_upgrade.erl | 2 +- tests/yz_ensemble.erl | 4 +- 118 files changed, 346 insertions(+), 343 deletions(-) diff --git a/src/riak_test_escript.erl b/src/riak_test_escript.erl index ffc9de750..5bb67cdbc 100644 --- a/src/riak_test_escript.erl +++ b/src/riak_test_escript.erl @@ -193,7 +193,7 @@ maybe_teardown(true, TestResults, Coverage, Verbose) -> so_kill_riak_maybe(); _ -> lager:info("Multiple tests run or no failure"), - rt:teardown(), + rt_cluster:teardown(), print_summary(TestResults, Coverage, Verbose) end, ok. diff --git a/src/rt.erl b/src/rt.erl index 94c4b531d..378b6bbe4 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -35,27 +35,16 @@ attach/2, attach_direct/2, brutal_kill/1, - build_cluster/1, - build_cluster/2, - build_cluster/3, - build_clusters/1, - join_cluster/1, capability/2, capability/3, check_singleton_node/1, check_ibrowse/0, claimant_according_to/1, - clean_cluster/1, - clean_data_dir/1, - clean_data_dir/2, cmd/1, cmd/2, connection_info/1, console/2, create_and_activate_bucket_type/3, - deploy_nodes/1, - deploy_nodes/2, - deploy_clusters/1, down/2, enable_search_hook/2, expect_in_log/2, @@ -123,10 +112,8 @@ systest_write/3, systest_write/5, systest_write/6, - teardown/0, upgrade/2, upgrade/3, - versions/0, wait_for_cluster_service/2, wait_for_cmd/1, wait_for_service/2, @@ -260,48 +247,6 @@ get_https_conn_info(Node) -> undefined end. -%% @doc Deploy a set of freshly installed Riak nodes, returning a list of the -%% nodes deployed. -%% @todo Re-add -spec after adding multi-version support -deploy_nodes(Versions) when is_list(Versions) -> - deploy_nodes(Versions, [riak_kv]); -deploy_nodes(NumNodes) when is_integer(NumNodes) -> - deploy_nodes([ current || _ <- lists:seq(1, NumNodes)]). - -%% @doc Deploy a set of freshly installed Riak nodes with the given -%% `InitialConfig', returning a list of the nodes deployed. --spec deploy_nodes(NumNodes :: integer(), any()) -> [node()]. -deploy_nodes(NumNodes, InitialConfig) when is_integer(NumNodes) -> - NodeConfig = [{current, InitialConfig} || _ <- lists:seq(1,NumNodes)], - deploy_nodes(NodeConfig); -deploy_nodes(Versions, Services) -> - NodeConfig = [ rt_config:version_to_config(Version) || Version <- Versions ], - Nodes = ?HARNESS:deploy_nodes(NodeConfig), - lager:info("Waiting for services ~p to start on ~p.", [Services, Nodes]), - [ ok = wait_for_service(Node, Service) || Node <- Nodes, - Service <- Services ], - Nodes. - -deploy_clusters(Settings) -> - ClusterConfigs = [case Setting of - Configs when is_list(Configs) -> - Configs; - NumNodes when is_integer(NumNodes) -> - [{current, default} || _ <- lists:seq(1, NumNodes)]; - {NumNodes, InitialConfig} when is_integer(NumNodes) -> - [{current, InitialConfig} || _ <- lists:seq(1,NumNodes)]; - {NumNodes, Vsn, InitialConfig} when is_integer(NumNodes) -> - [{Vsn, InitialConfig} || _ <- lists:seq(1,NumNodes)] - end || Setting <- Settings], - ?HARNESS:deploy_clusters(ClusterConfigs). - -build_clusters(Settings) -> - Clusters = deploy_clusters(Settings), - [begin - join_cluster(Nodes), - lager:info("Cluster built: ~p", [Nodes]) - end || Nodes <- Clusters], - Clusters. %% @doc Start the specified Riak node start(Node) -> @@ -1002,98 +947,7 @@ claimant_according_to(Node) -> BadRpc end. -%%%=================================================================== -%%% Cluster Utility Functions -%%%=================================================================== - -%% @doc Safely construct a new cluster and return a list of the deployed nodes -%% @todo Add -spec and update doc to reflect mult-version changes -build_cluster(Versions) when is_list(Versions) -> - build_cluster(length(Versions), Versions, default); -build_cluster(NumNodes) -> - build_cluster(NumNodes, default). - -%% @doc Safely construct a `NumNode' size cluster using -%% `InitialConfig'. Return a list of the deployed nodes. -build_cluster(NumNodes, InitialConfig) -> - build_cluster(NumNodes, [], InitialConfig). - -build_cluster(NumNodes, Versions, InitialConfig) -> - %% Deploy a set of new nodes - Nodes = - case Versions of - [] -> - deploy_nodes(NumNodes, InitialConfig); - _ -> - deploy_nodes(Versions) - end, - - join_cluster(Nodes), - lager:info("Cluster built: ~p", [Nodes]), - Nodes. - -join_cluster(Nodes) -> - %% Ensure each node owns 100% of it's own ring - [?assertEqual([Node], owners_according_to(Node)) || Node <- Nodes], - - %% Join nodes - [Node1|OtherNodes] = Nodes, - case OtherNodes of - [] -> - %% no other nodes, nothing to join/plan/commit - ok; - _ -> - %% ok do a staged join and then commit it, this eliminates the - %% large amount of redundant handoff done in a sequential join - [staged_join(Node, Node1) || Node <- OtherNodes], - plan_and_commit(Node1), - try_nodes_ready(Nodes, 3, 500) - end, - - ?assertEqual(ok, wait_until_nodes_ready(Nodes)), - - %% Ensure each node owns a portion of the ring - wait_until_nodes_agree_about_ownership(Nodes), - ?assertEqual(ok, wait_until_no_pending_changes(Nodes)), - ok. - -try_nodes_ready([Node1 | _Nodes], 0, _SleepMs) -> - lager:info("Nodes not ready after initial plan/commit, retrying"), - plan_and_commit(Node1); -try_nodes_ready(Nodes, N, SleepMs) -> - ReadyNodes = [Node || Node <- Nodes, is_ready(Node) =:= true], - case ReadyNodes of - Nodes -> - ok; - _ -> - timer:sleep(SleepMs), - try_nodes_ready(Nodes, N-1, SleepMs) - end. -%% @doc Stop nodes and wipe out their data directories -clean_cluster(Nodes) when is_list(Nodes) -> - [stop_and_wait(Node) || Node <- Nodes], - clean_data_dir(Nodes). - -clean_data_dir(Nodes) -> - clean_data_dir(Nodes, ""). - -clean_data_dir(Nodes, SubDir) when not is_list(Nodes) -> - clean_data_dir([Nodes], SubDir); -clean_data_dir(Nodes, SubDir) when is_list(Nodes) -> - ?HARNESS:clean_data_dir(Nodes, SubDir). - -%% @doc Shutdown every node, this is for after a test run is complete. -teardown() -> - %% stop all connected nodes, 'cause it'll be faster that - %%lager:info("RPC stopping these nodes ~p", [nodes()]), - %%[ rt:stop(Node) || Node <- nodes()], - %% Then do the more exhaustive harness thing, in case something was up - %% but not connected. - ?HARNESS:teardown(). - -versions() -> - ?HARNESS:versions(). %%%=================================================================== %%% Basic Read/Write Functions %%%=================================================================== diff --git a/src/rt_cluster.erl b/src/rt_cluster.erl index 372bd0869..1507a781f 100644 --- a/src/rt_cluster.erl +++ b/src/rt_cluster.erl @@ -19,15 +19,32 @@ %% ------------------------------------------------------------------- -module(rt_cluster). +-include_lib("eunit/include/eunit.hrl"). -export([properties/0, setup/2, config/0, - augment_config/3]). + augment_config/3, + deploy_nodes/1, + deploy_nodes/2, + deploy_clusters/1, + build_cluster/1, + build_cluster/2, + build_cluster/3, + build_clusters/1, + clean_cluster/1, + join_cluster/1, + clean_data_dir/1, + clean_data_dir/2, + try_nodes_ready/3, + versions/0, + teardown/0]). -export([maybe_wait_for_transfers/2]). -include("rt.hrl"). +-define(HARNESS, (rt_config:get(rt_harness))). + %% @doc Default properties used if a riak_test module does not specify %% a custom properties function. -spec properties() -> rt_properties(). @@ -37,7 +54,7 @@ properties() -> -spec setup(rt_properties(), proplists:proplist()) -> {ok, rt_properties()} | {error, term()}. setup(Properties, MetaData) -> - rt:set_conf(all, [{"buckets.default.allow_mult", "false"}]), + rt_config:set_conf(all, [{"buckets.default.allow_mult", "false"}]), RollingUpgrade = proplists:get_value(rolling_upgrade, MetaData, @@ -53,9 +70,52 @@ setup(Properties, MetaData) -> {ok, UpdProperties}. deploy_or_build_cluster(Versions, true) -> - rt:build_cluster(Versions); + build_cluster(Versions); deploy_or_build_cluster(Versions, false) -> - rt:deploy_nodes(Versions). + deploy_nodes(Versions). + +%% @doc Deploy a set of freshly installed Riak nodes, returning a list of the +%% nodes deployed. +%% @todo Re-add -spec after adding multi-version support +deploy_nodes(Versions) when is_list(Versions) -> + deploy_nodes(Versions, [riak_kv]); +deploy_nodes(NumNodes) when is_integer(NumNodes) -> + deploy_nodes([ current || _ <- lists:seq(1, NumNodes)]). + +%% @doc Deploy a set of freshly installed Riak nodes with the given +%% `InitialConfig', returning a list of the nodes deployed. +-spec deploy_nodes(NumNodes :: integer(), any()) -> [node()]. +deploy_nodes(NumNodes, InitialConfig) when is_integer(NumNodes) -> + NodeConfig = [{current, InitialConfig} || _ <- lists:seq(1,NumNodes)], + deploy_nodes(NodeConfig); +deploy_nodes(Versions, Services) -> + NodeConfig = [ rt_config:version_to_config(Version) || Version <- Versions ], + Nodes = ?HARNESS:deploy_nodes(NodeConfig), + lager:info("Waiting for services ~p to start on ~p.", [Services, Nodes]), + [ ok = rt:wait_for_service(Node, Service) || Node <- Nodes, + Service <- Services ], + Nodes. + +deploy_clusters(Settings) -> + ClusterConfigs = [case Setting of + Configs when is_list(Configs) -> + Configs; + NumNodes when is_integer(NumNodes) -> + [{current, default} || _ <- lists:seq(1, NumNodes)]; + {NumNodes, InitialConfig} when is_integer(NumNodes) -> + [{current, InitialConfig} || _ <- lists:seq(1,NumNodes)]; + {NumNodes, Vsn, InitialConfig} when is_integer(NumNodes) -> + [{Vsn, InitialConfig} || _ <- lists:seq(1,NumNodes)] + end || Setting <- Settings], + ?HARNESS:deploy_clusters(ClusterConfigs). + +build_clusters(Settings) -> + Clusters = deploy_clusters(Settings), + [begin + join_cluster(Nodes), + lager:info("Cluster built: ~p", [Nodes]) + end || Nodes <- Clusters], + Clusters. maybe_wait_for_transfers(Nodes, true) -> lager:info("Waiting for transfers"), @@ -63,6 +123,95 @@ maybe_wait_for_transfers(Nodes, true) -> maybe_wait_for_transfers(_Nodes, false) -> ok. +%% @doc Safely construct a new cluster and return a list of the deployed nodes +%% @todo Add -spec and update doc to reflect mult-version changes +build_cluster(Versions) when is_list(Versions) -> + build_cluster(length(Versions), Versions, default); +build_cluster(NumNodes) -> + build_cluster(NumNodes, default). + +%% @doc Safely construct a `NumNode' size cluster using +%% `InitialConfig'. Return a list of the deployed nodes. +build_cluster(NumNodes, InitialConfig) -> + build_cluster(NumNodes, [], InitialConfig). + +build_cluster(NumNodes, Versions, InitialConfig) -> + %% Deploy a set of new nodes + Nodes = + case Versions of + [] -> + deploy_nodes(NumNodes, InitialConfig); + _ -> + deploy_nodes(Versions) + end, + + join_cluster(Nodes), + lager:info("Cluster built: ~p", [Nodes]), + Nodes. + +join_cluster(Nodes) -> + %% Ensure each node owns 100% of it's own ring + [?assertEqual([Node], rt:owners_according_to(Node)) || Node <- Nodes], + + %% Join nodes + [Node1|OtherNodes] = Nodes, + case OtherNodes of + [] -> + %% no other nodes, nothing to join/plan/commit + ok; + _ -> + %% ok do a staged join and then commit it, this eliminates the + %% large amount of redundant handoff done in a sequential join + [rt:staged_join(Node, Node1) || Node <- OtherNodes], + rt:plan_and_commit(Node1), + try_nodes_ready(Nodes, 3, 500) + end, + + ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), + + %% Ensure each node owns a portion of the ring + rt:wait_until_nodes_agree_about_ownership(Nodes), + ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes)), + ok. + +try_nodes_ready([Node1 | _Nodes], 0, _SleepMs) -> + lager:info("Nodes not ready after initial plan/commit, retrying"), + rt:plan_and_commit(Node1); +try_nodes_ready(Nodes, N, SleepMs) -> + ReadyNodes = [Node || Node <- Nodes, rt:is_ready(Node) =:= true], + case ReadyNodes of + Nodes -> + ok; + _ -> + timer:sleep(SleepMs), + try_nodes_ready(Nodes, N-1, SleepMs) + end. + +%% @doc Stop nodes and wipe out their data directories +clean_cluster(Nodes) when is_list(Nodes) -> + [rt:stop_and_wait(Node) || Node <- Nodes], + clean_data_dir(Nodes). + +clean_data_dir(Nodes) -> + clean_data_dir(Nodes, ""). + +clean_data_dir(Nodes, SubDir) when not is_list(Nodes) -> + clean_data_dir([Nodes], SubDir); +clean_data_dir(Nodes, SubDir) when is_list(Nodes) -> + ?HARNESS:clean_data_dir(Nodes, SubDir). + +%% @doc Shutdown every node, this is for after a test run is complete. +teardown() -> + %% stop all connected nodes, 'cause it'll be faster that + %%lager:info("RPC stopping these nodes ~p", [nodes()]), + %%[ rt:stop(Node) || Node <- nodes()], + %% Then do the more exhaustive harness thing, in case something was up + %% but not connected. + ?HARNESS:teardown(). + +versions() -> + ?HARNESS:versions(). + config() -> [{riak_core, [{handoff_concurrency, 11}]}, {riak_search, [{enabled, true}]}, diff --git a/tests/cluster_meta_rmr.erl b/tests/cluster_meta_rmr.erl index beabc0714..fe7f73e3a 100644 --- a/tests/cluster_meta_rmr.erl +++ b/tests/cluster_meta_rmr.erl @@ -59,10 +59,10 @@ run(NumNodes, NumRounds, StableRounds) -> exit(Pid, kill), %% start all the down nodes so we can clean them :( [rt:start(Node) || Node <- DownNodes], - rt:clean_cluster(AllNodes). + rt_cluster:clean_cluster(AllNodes). setup_nodes(NumNodes) -> - Nodes = rt:build_cluster(NumNodes), + Nodes = rt_cluster:build_cluster(NumNodes), [begin ok = rpc:call(Node, application, set_env, [riak_core, broadcast_exchange_timer, 4294967295]), ok = rpc:call(Node, application, set_env, [riak_core, gossip_limit, {10000000, 4294967295}]), diff --git a/tests/cuttlefish_configuration.erl b/tests/cuttlefish_configuration.erl index 4447bfca4..ea10009bf 100644 --- a/tests/cuttlefish_configuration.erl +++ b/tests/cuttlefish_configuration.erl @@ -12,7 +12,7 @@ confirm() -> {"leveldb.sync_on_write", "on"} ], - [Node] = rt:deploy_nodes(1, {cuttlefish, CuttlefishConf}), + [Node] = rt_cluster:deploy_nodes(1, {cuttlefish, CuttlefishConf}), {ok, RingSize} = rt:rpc_get_env(Node, [{riak_core, ring_creation_size}]), ?assertEqual(8, RingSize), diff --git a/tests/ensemble_util.erl b/tests/ensemble_util.erl index d6f79145b..f206df9de 100644 --- a/tests/ensemble_util.erl +++ b/tests/ensemble_util.erl @@ -26,16 +26,16 @@ -include_lib("eunit/include/eunit.hrl"). build_cluster(Num, Config, NVal) -> - Nodes = rt:deploy_nodes(Num, Config), + Nodes = rt_cluster:deploy_nodes(Num, Config), Node = hd(Nodes), - rt:join_cluster(Nodes), + rt_cluster:join_cluster(Nodes), ensemble_util:wait_until_cluster(Nodes), ensemble_util:wait_for_membership(Node), ensemble_util:wait_until_stable(Node, NVal), Nodes. build_cluster_without_quorum(Num, Config) -> - Nodes = rt:deploy_nodes(Num, Config), + Nodes = rt_cluster:deploy_nodes(Num, Config), SetupLogCaptureFun = fun(Node) -> rt:setup_log_capture(Node) end, @@ -43,7 +43,7 @@ build_cluster_without_quorum(Num, Config) -> Node = hd(Nodes), ok = rpc:call(Node, riak_ensemble_manager, enable, []), _ = rpc:call(Node, riak_core_ring_manager, force_update, []), - rt:join_cluster(Nodes), + rt_cluster:join_cluster(Nodes), ensemble_util:wait_until_cluster(Nodes), ensemble_util:wait_for_membership(Node), Nodes. diff --git a/tests/gh_riak_core_154.erl b/tests/gh_riak_core_154.erl index ff722a483..70882e5dc 100644 --- a/tests/gh_riak_core_154.erl +++ b/tests/gh_riak_core_154.erl @@ -28,7 +28,7 @@ confirm() -> %% Increase handoff concurrency on nodes NewConfig = [{riak_core, [{handoff_concurrency, 1024}]}], - Nodes = rt:build_cluster(2, NewConfig), + Nodes = rt_cluster:build_cluster(2, NewConfig), ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), [Node1, Node2] = Nodes, diff --git a/tests/gh_riak_core_155.erl b/tests/gh_riak_core_155.erl index 5bd4bb17d..fb53b69ce 100644 --- a/tests/gh_riak_core_155.erl +++ b/tests/gh_riak_core_155.erl @@ -24,7 +24,7 @@ -include_lib("eunit/include/eunit.hrl"). confirm() -> - [Node] = rt:build_cluster(1), + [Node] = rt_cluster:build_cluster(1), %% Generate a valid preflist for our get requests rpc:call(Node, riak_core, wait_for_service, [riak_kv]), diff --git a/tests/gh_riak_core_176.erl b/tests/gh_riak_core_176.erl index 3f7b351e1..959d14e68 100644 --- a/tests/gh_riak_core_176.erl +++ b/tests/gh_riak_core_176.erl @@ -23,7 +23,7 @@ -include_lib("eunit/include/eunit.hrl"). confirm() -> - Nodes = rt:deploy_nodes(3), + Nodes = rt_cluster:deploy_nodes(3), [Node1, Node2, Node3] = Nodes, Nodes12 = [Node1, Node2], Nodes123 = Nodes, diff --git a/tests/gh_riak_kv_765.erl b/tests/gh_riak_kv_765.erl index a8f070cdd..0c5c880cb 100644 --- a/tests/gh_riak_kv_765.erl +++ b/tests/gh_riak_kv_765.erl @@ -41,7 +41,7 @@ confirm() -> check_empty_build() -> Config = [{riak_core, [{vnode_management_timer, 1000}, {ring_creation_size, 4}]}], - Nodes = rt:build_cluster(1, Config), + Nodes = rt_cluster:build_cluster(1, Config), Node = hd(Nodes), timer:sleep(2000), Self = self(), @@ -56,7 +56,7 @@ check_empty_build() -> lager:info("Failed. Empty AAE trees were not built instantly"), fail end, - rt:clean_cluster(Nodes), + rt_cluster:clean_cluster(Nodes), Result. check_throttle_and_expiration() -> @@ -66,7 +66,7 @@ check_throttle_and_expiration() -> {anti_entropy, {off, []}}]}, {riak_core, [{vnode_management_timer, 1000}, {ring_creation_size, 4}]}], - Nodes = rt:build_cluster(1, Config), + Nodes = rt_cluster:build_cluster(1, Config), Node = hd(Nodes), timer:sleep(2000), diff --git a/tests/http_bucket_types.erl b/tests/http_bucket_types.erl index 0c24ab278..74d348cb2 100644 --- a/tests/http_bucket_types.erl +++ b/tests/http_bucket_types.erl @@ -9,7 +9,7 @@ confirm() -> application:start(ibrowse), lager:info("Deploy some nodes"), - Nodes = rt:build_cluster(4, [], [ + Nodes = rt_cluster:build_cluster(4, [], [ {riak_core, [{default_bucket_props, [{n_val, 2}]}]}]), Node = hd(Nodes), diff --git a/tests/http_security.erl b/tests/http_security.erl index c48e12771..d7b4dcc6b 100644 --- a/tests/http_security.erl +++ b/tests/http_security.erl @@ -43,7 +43,7 @@ confirm() -> {enabled, true} ]} ], - Nodes = rt:build_cluster(4, Conf), + Nodes = rt_cluster:build_cluster(4, Conf), Node = hd(Nodes), %% enable security on the cluster ok = rpc:call(Node, riak_core_console, security_enable, [[]]), diff --git a/tests/jmx_verify.erl b/tests/jmx_verify.erl index 31dd3bd19..508174160 100644 --- a/tests/jmx_verify.erl +++ b/tests/jmx_verify.erl @@ -32,7 +32,7 @@ confirm() -> JMXPort = 41111, Config = [{riak_jmx, [{enabled, true}, {port, JMXPort}]}], - Nodes = rt:deploy_nodes(1, Config), + Nodes = rt_cluster:deploy_nodes(1, Config), [Node1] = Nodes, ?assertEqual(ok, rt:wait_until_nodes_ready([Node1])), @@ -108,7 +108,7 @@ confirm() -> test_supervision() -> JMXPort = 80, Config = [{riak_jmx, [{enabled, true}, {port, JMXPort}]}], - [Node|[]] = rt:deploy_nodes(1, Config), + [Node|[]] = rt_cluster:deploy_nodes(1, Config), timer:sleep(20000), case net_adm:ping(Node) of pang -> @@ -160,7 +160,7 @@ test_application_stop() -> lager:info("Testing application:stop()"), JMXPort = 41111, Config = [{riak_jmx, [{enabled, true}, {port, JMXPort}]}], - Nodes = rt:deploy_nodes(1, Config), + Nodes = rt_cluster:deploy_nodes(1, Config), [Node] = Nodes, ?assertEqual(ok, rt:wait_until_nodes_ready([Node])), diff --git a/tests/loaded_upgrade.erl b/tests/loaded_upgrade.erl index 9dd09fbdc..50e46e1ab 100644 --- a/tests/loaded_upgrade.erl +++ b/tests/loaded_upgrade.erl @@ -43,7 +43,7 @@ confirm() -> Config = [{riak_search, [{enabled, true}]}, {riak_pipe, [{worker_limit, 200}]}], NumNodes = 4, Vsns = [{OldVsn, Config} || _ <- lists:seq(1,NumNodes)], - Nodes = rt:build_cluster(Vsns), + Nodes = rt_cluster:build_cluster(Vsns), seed_cluster(Nodes), diff --git a/tests/mapred_basic_compat.erl b/tests/mapred_basic_compat.erl index 0aa0f202f..6b827b7fa 100644 --- a/tests/mapred_basic_compat.erl +++ b/tests/mapred_basic_compat.erl @@ -41,7 +41,7 @@ -define(BUCKET_TYPE, <<"mytype">>). confirm() -> - Nodes = rt:build_cluster(3), + Nodes = rt_cluster:build_cluster(3), [Node1|_] = Nodes, %% create a new type diff --git a/tests/mapred_buffer_prereduce.erl b/tests/mapred_buffer_prereduce.erl index 9356dbc34..d6b6cbac7 100644 --- a/tests/mapred_buffer_prereduce.erl +++ b/tests/mapred_buffer_prereduce.erl @@ -35,7 +35,7 @@ -define(NUM_INTS, 1000). confirm() -> - Nodes = rt:build_cluster(3), + Nodes = rt_cluster:build_cluster(3), load_test_data(Nodes), diff --git a/tests/mapred_dead_pipe.erl b/tests/mapred_dead_pipe.erl index b60728d46..53abb7f4d 100644 --- a/tests/mapred_dead_pipe.erl +++ b/tests/mapred_dead_pipe.erl @@ -44,7 +44,7 @@ }">>). confirm() -> - Nodes = rt:build_cluster(3), + Nodes = rt_cluster:build_cluster(3), %% to pick up fake_builder/1 rt:load_modules_on_nodes([?MODULE], Nodes), diff --git a/tests/mapred_http_errors.erl b/tests/mapred_http_errors.erl index 5d31f8995..aa07c2f4d 100644 --- a/tests/mapred_http_errors.erl +++ b/tests/mapred_http_errors.erl @@ -36,7 +36,7 @@ map_never_notfound(Object, _, _) when Object /= {error, notfound} -> [ok]. confirm() -> - Nodes = rt:build_cluster(1), + Nodes = rt_cluster:build_cluster(1), rt:load_modules_on_nodes([?MODULE], Nodes), diff --git a/tests/mapred_javascript.erl b/tests/mapred_javascript.erl index 5f8d8d502..fedfadf5c 100644 --- a/tests/mapred_javascript.erl +++ b/tests/mapred_javascript.erl @@ -43,7 +43,7 @@ }">>). confirm() -> - Nodes = rt:build_cluster(3), + Nodes = rt_cluster:build_cluster(3), load_test_data(Nodes), diff --git a/tests/mapred_notfound_failover.erl b/tests/mapred_notfound_failover.erl index 8c8e2e424..9076a1c30 100644 --- a/tests/mapred_notfound_failover.erl +++ b/tests/mapred_notfound_failover.erl @@ -41,7 +41,7 @@ confirm() -> %% notfound by killing a vnode rt:set_backend(memory), - Nodes = rt:build_cluster(3), + Nodes = rt_cluster:build_cluster(3), %% for our custom reduce phase rt:load_modules_on_nodes([?MODULE], Nodes), diff --git a/tests/mapred_search_switch.erl b/tests/mapred_search_switch.erl index 85434e20f..1b9bbfc8c 100644 --- a/tests/mapred_search_switch.erl +++ b/tests/mapred_search_switch.erl @@ -67,7 +67,7 @@ setup_test_env() -> %% must enable both RS and YZ at startup to get test data indexed; %% nothing extra would be tested by using multiple nodes, so just %% deploy one to make the test run faster - Nodes = rt:deploy_nodes(1, [{riak_search, [{enabled, true}]}, + Nodes = rt_cluster:deploy_nodes(1, [{riak_search, [{enabled, true}]}, {yokozuna, [{enabled, true}]}]), ok = rt:wait_until_nodes_ready(Nodes), ok = rt:wait_for_cluster_service(Nodes, riak_search), diff --git a/tests/mapred_verify_rt.erl b/tests/mapred_verify_rt.erl index c09a63e5b..b7f4e9b98 100644 --- a/tests/mapred_verify_rt.erl +++ b/tests/mapred_verify_rt.erl @@ -29,7 +29,7 @@ confirm() -> lager:info("Build ~b node cluster", [?NODE_COUNT]), - Nodes = rt:build_cluster(?NODE_COUNT), + Nodes = rt_cluster:build_cluster(?NODE_COUNT), %% @todo longer term fix is probably one or more of: diff --git a/tests/overload.erl b/tests/overload.erl index 320af76f3..63f6a4040 100644 --- a/tests/overload.erl +++ b/tests/overload.erl @@ -33,7 +33,7 @@ confirm() -> {riak_kv, [{fsm_limit, undefined}, {storage_backend, riak_kv_memory_backend}, {anti_entropy, {off, []}}]}], - Nodes = rt:build_cluster(2, Config), + Nodes = rt_cluster:build_cluster(2, Config), [_Node1, Node2] = Nodes, Ring = rt:get_ring(Node2), diff --git a/tests/partition_repair.erl b/tests/partition_repair.erl index 62ba5806c..757aabf85 100644 --- a/tests/partition_repair.erl +++ b/tests/partition_repair.erl @@ -72,7 +72,7 @@ confirm() -> %% [{"./log/console.log",debug,10485760,"$D0",5}]}]}]} ], - Nodes = rt:build_cluster(NumNodes, Conf), + Nodes = rt_cluster:build_cluster(NumNodes, Conf), case NVal of undefined -> @@ -120,7 +120,7 @@ kill_repair_verify({Partition, Node}, DataSuffix, Service) -> %% kill the partition data Path = DataSuffix ++ "/" ++ integer_to_list(Partition), lager:info("Killing data for ~p on ~p at ~s", [Partition, Node, Path]), - rt:clean_data_dir([Node], Path), + rt_cluster:clean_data_dir([Node], Path), %% force restart of vnode since some data is kept in memory lager:info("Restarting ~p vnode for ~p on ~p", [Service, Partition, Node]), diff --git a/tests/pb_cipher_suites.erl b/tests/pb_cipher_suites.erl index 59f6a57ec..0b98c4701 100644 --- a/tests/pb_cipher_suites.erl +++ b/tests/pb_cipher_suites.erl @@ -46,7 +46,7 @@ confirm() -> ]} ], - Nodes = rt:build_cluster(4, Conf), + Nodes = rt_cluster:build_cluster(4, Conf), Node = hd(Nodes), %% enable security on the cluster ok = rpc:call(Node, riak_core_console, security_enable, [[]]), diff --git a/tests/pb_security.erl b/tests/pb_security.erl index d1b780613..2562615f4 100644 --- a/tests/pb_security.erl +++ b/tests/pb_security.erl @@ -69,7 +69,7 @@ confirm() -> _ -> true end, - Nodes = rt:build_cluster(4, Conf), + Nodes = rt_cluster:build_cluster(4, Conf), Node = hd(Nodes), %% enable security on the cluster ok = rpc:call(Node, riak_core_console, security_enable, [[]]), diff --git a/tests/pipe_verify_basics.erl b/tests/pipe_verify_basics.erl index 8859074cf..1e1d4e064 100644 --- a/tests/pipe_verify_basics.erl +++ b/tests/pipe_verify_basics.erl @@ -40,7 +40,7 @@ confirm() -> lager:info("Build ~b node cluster", [?NODE_COUNT]), - Nodes = rt:build_cluster(?NODE_COUNT), + Nodes = rt_cluster:build_cluster(?NODE_COUNT), rt:load_modules_on_nodes([?MODULE], Nodes), diff --git a/tests/pipe_verify_examples.erl b/tests/pipe_verify_examples.erl index a45df7001..01f493c2a 100644 --- a/tests/pipe_verify_examples.erl +++ b/tests/pipe_verify_examples.erl @@ -29,7 +29,7 @@ confirm() -> lager:info("Build ~b node cluster", [?NODE_COUNT]), - Nodes = rt:build_cluster(?NODE_COUNT), + Nodes = rt_cluster:build_cluster(?NODE_COUNT), verify_example(Nodes), verify_example_transform(Nodes), diff --git a/tests/pipe_verify_exceptions.erl b/tests/pipe_verify_exceptions.erl index da83ae9ff..a64fca555 100644 --- a/tests/pipe_verify_exceptions.erl +++ b/tests/pipe_verify_exceptions.erl @@ -45,7 +45,7 @@ %% @doc riak_test callback confirm() -> lager:info("Build ~b node cluster", [?NODE_COUNT]), - Nodes = rt:build_cluster(?NODE_COUNT), + Nodes = rt_cluster:build_cluster(?NODE_COUNT), rt:load_modules_on_nodes([?MODULE, rt_pipe], Nodes), diff --git a/tests/pipe_verify_handoff.erl b/tests/pipe_verify_handoff.erl index ac578e5a8..f0f3f2251 100644 --- a/tests/pipe_verify_handoff.erl +++ b/tests/pipe_verify_handoff.erl @@ -62,7 +62,7 @@ confirm() -> lager:info("Start ~b nodes", [?NODE_COUNT]), NodeDefs = lists:duplicate(?NODE_COUNT, {current, default}), Services = [riak_pipe], - [Primary,Secondary] = Nodes = rt:deploy_nodes(NodeDefs, Services), + [Primary,Secondary] = Nodes = rt_cluster:deploy_nodes(NodeDefs, Services), %% Ensure each node owns 100% of it's own ring [?assertEqual([Node], rt:owners_according_to(Node)) || Node <- Nodes], diff --git a/tests/pipe_verify_handoff_blocking.erl b/tests/pipe_verify_handoff_blocking.erl index 24fa19828..e8a454c54 100644 --- a/tests/pipe_verify_handoff_blocking.erl +++ b/tests/pipe_verify_handoff_blocking.erl @@ -70,7 +70,7 @@ confirm() -> lager:info("Start ~b nodes", [?NODE_COUNT]), NodeDefs = lists:duplicate(?NODE_COUNT, {current, default}), Services = [riak_pipe], - [Primary,Secondary] = Nodes = rt:deploy_nodes(NodeDefs, Services), + [Primary,Secondary] = Nodes = rt_cluster:deploy_nodes(NodeDefs, Services), %% Ensure each node owns 100% of it's own ring [?assertEqual([Node], rt:owners_according_to(Node)) || Node <- Nodes], diff --git a/tests/pipe_verify_restart_input_forwarding.erl b/tests/pipe_verify_restart_input_forwarding.erl index a9ae5f561..4b3fedaaa 100644 --- a/tests/pipe_verify_restart_input_forwarding.erl +++ b/tests/pipe_verify_restart_input_forwarding.erl @@ -52,7 +52,7 @@ %% @doc riak_test callback confirm() -> lager:info("Build ~b node cluster", [?NODE_COUNT]), - Nodes = rt:build_cluster(?NODE_COUNT), + Nodes = rt_cluster:build_cluster(?NODE_COUNT), rt:load_modules_on_nodes([?MODULE, rt_pipe], Nodes), diff --git a/tests/pipe_verify_sink_types.erl b/tests/pipe_verify_sink_types.erl index 52c433b6f..55f153720 100644 --- a/tests/pipe_verify_sink_types.erl +++ b/tests/pipe_verify_sink_types.erl @@ -39,7 +39,7 @@ %% @doc riak_test callback confirm() -> lager:info("Build ~b node cluster", [?NODE_COUNT]), - Nodes = rt:build_cluster(?NODE_COUNT), + Nodes = rt_cluster:build_cluster(?NODE_COUNT), verify_raw(Nodes), verify_fsm(Nodes), diff --git a/tests/post_generate_key.erl b/tests/post_generate_key.erl index b8197caa9..de77942b9 100644 --- a/tests/post_generate_key.erl +++ b/tests/post_generate_key.erl @@ -25,7 +25,7 @@ -include_lib("eunit/include/eunit.hrl"). confirm() -> - Nodes = rt:build_cluster(1), + Nodes = rt_cluster:build_cluster(1), ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), [Base|_] = rt:http_url(Nodes), diff --git a/tests/pr_pw.erl b/tests/pr_pw.erl index 15b5a59a7..07b31ec30 100644 --- a/tests/pr_pw.erl +++ b/tests/pr_pw.erl @@ -8,7 +8,7 @@ confirm() -> application:start(inets), lager:info("Deploy some nodes"), - Nodes = rt:build_cluster(4), + Nodes = rt_cluster:build_cluster(4), %% calculate the preflist for foo/bar {ok, Ring} = rpc:call(hd(Nodes), riak_core_ring_manager, get_my_ring, []), diff --git a/tests/replication/repl_aae_fullsync.erl b/tests/replication/repl_aae_fullsync.erl index dc0e25b66..fdb655df0 100644 --- a/tests/replication/repl_aae_fullsync.erl +++ b/tests/replication/repl_aae_fullsync.erl @@ -114,7 +114,7 @@ simple_test() -> %% intercepts are removed. validate_completed_fullsync(LeaderA, BFirst, "B", 1, ?NUM_KEYS), - rt:clean_cluster(Nodes), + rt_cluster:clean_cluster(Nodes), pass. @@ -297,7 +297,7 @@ bidirectional_test() -> validate_completed_fullsync(LeaderB, AFirst, "A", ?NUM_KEYS + 1, ?NUM_KEYS + ?NUM_KEYS), %% Clean. - rt:clean_cluster(Nodes), + rt_cluster:clean_cluster(Nodes), pass. @@ -389,7 +389,7 @@ difference_test() -> [{timeout, 4000}]), ?assertEqual([<<"baz">>, <<"baz2">>], lists:sort(riakc_obj:get_values(O2))), - rt:clean_cluster(Nodes), + rt_cluster:clean_cluster(Nodes), pass. @@ -458,7 +458,7 @@ deadlock_test() -> lager:info("Status result: ~p", [Result]), ?assertNotEqual({badrpc, timeout}, Result), - rt:clean_cluster(Nodes), + rt_cluster:clean_cluster(Nodes), pass. diff --git a/tests/replication/repl_bucket_types.erl b/tests/replication/repl_bucket_types.erl index b643bfd5b..17e3f3c18 100644 --- a/tests/replication/repl_bucket_types.erl +++ b/tests/replication/repl_bucket_types.erl @@ -57,7 +57,7 @@ cleanup({ClusterNodes, _Types, PBA, PBB}, CleanCluster) -> {_, _, ANodes, BNodes} = ClusterNodes, case CleanCluster of true -> - rt:clean_cluster(ANodes ++ BNodes); + rt_cluster:clean_cluster(ANodes ++ BNodes); false -> ok end. @@ -339,10 +339,10 @@ cluster_conf() -> ]. deploy_nodes(NumNodes, current) -> - rt:deploy_nodes(NumNodes, cluster_conf()); + rt_cluster:deploy_nodes(NumNodes, cluster_conf()); deploy_nodes(_, mixed) -> Conf = cluster_conf(), - rt:deploy_nodes([{current, Conf}, {previous, Conf}]). + rt_cluster:deploy_nodes([{current, Conf}, {previous, Conf}]). %% @doc Create two clusters of 1 node each and connect them for replication: %% Cluster "A" -> cluster "B" diff --git a/tests/replication/repl_cancel_fullsync.erl b/tests/replication/repl_cancel_fullsync.erl index 435d6927e..ff47d2d76 100644 --- a/tests/replication/repl_cancel_fullsync.erl +++ b/tests/replication/repl_cancel_fullsync.erl @@ -35,7 +35,7 @@ confirm() -> rt_config:set_advanced_conf(all, ?CONF(5)), - Nodes = [ANodes, BNodes] = rt:build_clusters([3, 3]), + Nodes = [ANodes, BNodes] = rt_cluster:build_clusters([3, 3]), lager:info("ANodes: ~p", [ANodes]), lager:info("BNodes: ~p", [BNodes]), @@ -138,7 +138,7 @@ confirm() -> lager:info("Fullsync Complete"), rt:log_to_nodes(Nodes, "Test completed."), - rt:clean_cluster(ANodes), - rt:clean_cluster(BNodes), + rt_cluster:clean_cluster(ANodes), + rt_cluster:clean_cluster(BNodes), pass. diff --git a/tests/replication/repl_consistent_object_filter.erl b/tests/replication/repl_consistent_object_filter.erl index 73b45b7cc..8766322d8 100644 --- a/tests/replication/repl_consistent_object_filter.erl +++ b/tests/replication/repl_consistent_object_filter.erl @@ -103,7 +103,7 @@ make_clusters() -> ]} ], - Nodes = rt:deploy_nodes(NumNodes, Conf), + Nodes = rt_cluster:deploy_nodes(NumNodes, Conf), {ANodes, BNodes} = lists:split(ClusterASize, Nodes), lager:info("ANodes: ~p", [ANodes]), lager:info("BNodes: ~p", [BNodes]), diff --git a/tests/replication/repl_fs_bench.erl b/tests/replication/repl_fs_bench.erl index 7736fcb6c..9d783b752 100644 --- a/tests/replication/repl_fs_bench.erl +++ b/tests/replication/repl_fs_bench.erl @@ -62,7 +62,7 @@ confirm() -> fullsync_test(Strategy, Latency) -> rt_config:set_advanced_conf(all, ?CONF(Strategy)), - [ANodes, BNodes] = rt:build_clusters([3, 3]), + [ANodes, BNodes] = rt_cluster:build_clusters([3, 3]), AFirst = hd(ANodes), BFirst = hd(BNodes), @@ -147,7 +147,7 @@ fullsync_test(Strategy, Latency) -> start_and_wait_until_fullsync_complete, [LeaderA]), - rt:clean_cluster(ANodes), - rt:clean_cluster(BNodes), + rt_cluster:clean_cluster(ANodes), + rt_cluster:clean_cluster(BNodes), {EmptyTime, FullTime, DiffTime, NoneTime}. diff --git a/tests/replication/repl_fs_stat_caching.erl b/tests/replication/repl_fs_stat_caching.erl index 7a642c0fb..6ecb655f0 100644 --- a/tests/replication/repl_fs_stat_caching.erl +++ b/tests/replication/repl_fs_stat_caching.erl @@ -38,7 +38,7 @@ setup() -> NodeCount = rt_config:get(num_nodes, 6), lager:info("Deploy ~p nodes", [NodeCount]), - Nodes = rt:deploy_nodes(NodeCount, cluster_conf()), + Nodes = rt_cluster:deploy_nodes(NodeCount, cluster_conf()), SplitSize = NodeCount div 2, {SourceNodes, SinkNodes} = lists:split(SplitSize, Nodes), diff --git a/tests/replication/repl_location_failures.erl b/tests/replication/repl_location_failures.erl index 6fe89d075..7cc5ce186 100644 --- a/tests/replication/repl_location_failures.erl +++ b/tests/replication/repl_location_failures.erl @@ -36,7 +36,7 @@ confirm() -> rt_config:set_advanced_conf(all, ?CONF(5)), - [ANodes, BNodes] = rt:build_clusters([3, 3]), + [ANodes, BNodes] = rt_cluster:build_clusters([3, 3]), lager:info("ANodes: ~p", [ANodes]), lager:info("BNodes: ~p", [BNodes]), @@ -99,7 +99,7 @@ confirm() -> repl_util:validate_completed_fullsync(LeaderA, BFirst, "B", 1, ?NUM_KEYS, ?TEST_BUCKET), - rt:clean_cluster(ANodes), - rt:clean_cluster(BNodes), + rt_cluster:clean_cluster(ANodes), + rt_cluster:clean_cluster(BNodes), pass. diff --git a/tests/replication/repl_reduced.erl b/tests/replication/repl_reduced.erl index a96c5e888..50af2cf5c 100644 --- a/tests/replication/repl_reduced.erl +++ b/tests/replication/repl_reduced.erl @@ -19,12 +19,12 @@ confirm() -> toggle_enabled_test_() -> {setup, fun() -> - Nodes = rt:deploy_nodes(3, conf()), + Nodes = rt_cluster:deploy_nodes(3, conf()), repl_util:make_cluster(Nodes), Nodes end, fun(Nodes) -> - rt:clean_cluster(Nodes) + rt_cluster:clean_cluster(Nodes) end, fun(Nodes) -> [ @@ -78,7 +78,7 @@ data_push() -> data_push_test_() -> {timeout, rt_cascading:timeout(1000000000000000), {setup, fun() -> - Nodes = rt:deploy_nodes(6, conf()), + Nodes = rt_cluster:deploy_nodes(6, conf()), {[N1 | _] = C123, [N4 | _] = C456} = lists:split(3, Nodes), repl_util:make_cluster(C123), repl_util:name_cluster(N1, "c123"), @@ -92,9 +92,9 @@ data_push_test_() -> fun(State) -> case rt_config:config_or_os_env(skip_teardown, false) of "false" -> - rt:clean_cluster(State#data_push_test.nodes); + rt_cluster:clean_cluster(State#data_push_test.nodes); false -> - rt:clean_cluster(State#data_push_test.nodes); + rt_cluster:clean_cluster(State#data_push_test.nodes); _ -> ok end @@ -211,7 +211,7 @@ read_repair_interaction() -> read_repair_interaction_test_() -> {timeout, rt_cascading:timeout(100000), {setup, fun() -> - Nodes = rt:deploy_nodes(6, conf()), + Nodes = rt_cluster:deploy_nodes(6, conf()), {[N1 | _] = C123, [N4 | _] = C456} = lists:split(3, Nodes), repl_util:make_cluster(C123), repl_util:name_cluster(N1, "c123"), @@ -225,9 +225,9 @@ read_repair_interaction_test_() -> fun(State) -> case rt_config:config_or_os_env(skip_teardown, false) of "false" -> - rt:clean_cluster(State#data_push_test.nodes); + rt_cluster:clean_cluster(State#data_push_test.nodes); false -> - rt:clean_cluster(State#data_push_test.nodes); + rt_cluster:clean_cluster(State#data_push_test.nodes); _ -> ok end diff --git a/tests/replication/repl_rt_cascading_rtq.erl b/tests/replication/repl_rt_cascading_rtq.erl index af9960889..64939bae0 100644 --- a/tests/replication/repl_rt_cascading_rtq.erl +++ b/tests/replication/repl_rt_cascading_rtq.erl @@ -153,9 +153,9 @@ cluster_conf(_CascadingWrites) -> ]. deploy_nodes(NumNodes, true) -> - rt:deploy_nodes(NumNodes, cluster_conf(always)); + rt_cluster:deploy_nodes(NumNodes, cluster_conf(always)); deploy_nodes(NumNodes, false) -> - rt:deploy_nodes(NumNodes, cluster_conf(never)). + rt_cluster:deploy_nodes(NumNodes, cluster_conf(never)). %% @doc Turn on Realtime replication on the cluster lead by LeaderA. %% The clusters must already have been named and connected. diff --git a/tests/replication/repl_rt_heartbeat.erl b/tests/replication/repl_rt_heartbeat.erl index 94bb203c6..8cf4a3614 100644 --- a/tests/replication/repl_rt_heartbeat.erl +++ b/tests/replication/repl_rt_heartbeat.erl @@ -174,7 +174,7 @@ make_connected_clusters() -> ]} ], - Nodes = rt:deploy_nodes(NumNodes, Conf), + Nodes = rt_cluster:deploy_nodes(NumNodes, Conf), {ANodes, BNodes} = lists:split(ClusterASize, Nodes), lager:info("ANodes: ~p", [ANodes]), diff --git a/tests/replication/repl_rt_overload.erl b/tests/replication/repl_rt_overload.erl index 2fafdec23..e425e02b3 100644 --- a/tests/replication/repl_rt_overload.erl +++ b/tests/replication/repl_rt_overload.erl @@ -128,7 +128,7 @@ make_connected_clusters() -> ]} ], - Nodes = rt:deploy_nodes(NumNodes, Conf), + Nodes = rt_cluster:deploy_nodes(NumNodes, Conf), {ANodes, BNodes} = lists:split(ClusterASize, Nodes), lager:info("ANodes: ~p", [ANodes]), lager:info("BNodes: ~p", [BNodes]), diff --git a/tests/replication/repl_rt_pending.erl b/tests/replication/repl_rt_pending.erl index 2c3c6346c..e393e979a 100644 --- a/tests/replication/repl_rt_pending.erl +++ b/tests/replication/repl_rt_pending.erl @@ -174,7 +174,7 @@ make_connected_clusters() -> ]} ], - Nodes = rt:deploy_nodes(NumNodes, Conf), + Nodes = rt_cluster:deploy_nodes(NumNodes, Conf), {ANodes, BNodes} = lists:split(ClusterASize, Nodes), lager:info("ANodes: ~p", [ANodes]), diff --git a/tests/replication/replication.erl b/tests/replication/replication.erl index 5fa70cbed..c31ae1507 100644 --- a/tests/replication/replication.erl +++ b/tests/replication/replication.erl @@ -22,7 +22,7 @@ confirm() -> ]} ], rt_config:set_advanced_conf(all, Conf), - [ANodes, BNodes] = rt:build_clusters([3, 3]), + [ANodes, BNodes] = rt_cluster:build_clusters([3, 3]), replication(ANodes, BNodes, false), pass. diff --git a/tests/replication/replication2_connections.erl b/tests/replication/replication2_connections.erl index da81a29fc..a148aa727 100644 --- a/tests/replication/replication2_connections.erl +++ b/tests/replication/replication2_connections.erl @@ -46,7 +46,7 @@ simple_test() -> rt_config:set_advanced_conf(all, Conf), - [ANodes, BNodes] = rt:build_clusters([3, 3]), + [ANodes, BNodes] = rt_cluster:build_clusters([3, 3]), lager:info("ANodes: ~p", [ANodes]), lager:info("BNodes: ~p", [BNodes]), @@ -89,8 +89,8 @@ simple_test() -> [verify_connectivity(Node, "B") || Node <- ANodes], [verify_connectivity(Node, "A") || Node <- BNodes], - rt:clean_cluster(ANodes), - rt:clean_cluster(BNodes), + rt_cluster:clean_cluster(ANodes), + rt_cluster:clean_cluster(BNodes), pass. @@ -108,7 +108,7 @@ disconnect_test() -> rt_config:set_advanced_conf(all, Conf), - [ANodes, BNodes] = rt:build_clusters([3, 3]), + [ANodes, BNodes] = rt_cluster:build_clusters([3, 3]), lager:info("ANodes: ~p", [ANodes]), lager:info("BNodes: ~p", [BNodes]), @@ -151,8 +151,8 @@ disconnect_test() -> lager:info("Verifying disconnect from B to A."), [verify_disconnect(Node, "A") || Node <- BNodes], - rt:clean_cluster(ANodes), - rt:clean_cluster(BNodes), + rt_cluster:clean_cluster(ANodes), + rt_cluster:clean_cluster(BNodes), pass. @@ -175,7 +175,7 @@ error_cleanup_test() -> rt_config:set_advanced_conf(all, Conf), - [ANodes, BNodes] = rt:build_clusters([3, 3]), + [ANodes, BNodes] = rt_cluster:build_clusters([3, 3]), lager:info("ANodes: ~p", [ANodes]), lager:info("BNodes: ~p", [BNodes]), @@ -227,8 +227,8 @@ error_cleanup_test() -> lager:info("Verifying connection from A to B"), [verify_connectivity(Node, "B") || Node <- ANodes], - rt:clean_cluster(ANodes), - rt:clean_cluster(BNodes), + rt_cluster:clean_cluster(ANodes), + rt_cluster:clean_cluster(BNodes), pass. diff --git a/tests/replication/replication2_console_tests.erl b/tests/replication/replication2_console_tests.erl index 868096c1b..c090bf9a1 100644 --- a/tests/replication/replication2_console_tests.erl +++ b/tests/replication/replication2_console_tests.erl @@ -48,7 +48,7 @@ confirm() -> %% Deploy a node to test against lager:info("Deploy node to test riak-repl command line"), - [Node] = rt:deploy_nodes(1), + [Node] = rt_cluster:deploy_nodes(1), ?assertEqual(ok, rt:wait_until_nodes_ready([Node])), rt_intercept:add(Node, {riak_repl_console, diff --git a/tests/replication/replication2_fsschedule.erl b/tests/replication/replication2_fsschedule.erl index 9e12b7752..a320a9291 100644 --- a/tests/replication/replication2_fsschedule.erl +++ b/tests/replication/replication2_fsschedule.erl @@ -99,7 +99,7 @@ test_multiple_schedules() -> lager:info("Waiting for fullsyncs"), wait_until_fullsyncs(LeaderA, "B", 5), wait_until_fullsyncs(LeaderA, "C", 5), - rt:clean_cluster(AllNodes), + rt_cluster:clean_cluster(AllNodes), pass. test_single_schedule() -> @@ -118,7 +118,7 @@ test_single_schedule() -> lager:info("Waiting for fullsyncs"), wait_until_fullsyncs(LeaderA, "B", 10), wait_until_fullsyncs(LeaderA, "C", 10), - rt:clean_cluster(AllNodes), + rt_cluster:clean_cluster(AllNodes), pass. test_mixed_12_13() -> @@ -152,7 +152,7 @@ test_mixed_12_13() -> wait_until_fullsyncs(LeaderA, "B", 3), wait_until_fullsyncs(LeaderA, "C", 3), wait_until_12_fs_complete(LeaderA, 9), - rt:clean_cluster(AllNodes), + rt_cluster:clean_cluster(AllNodes), pass. diff --git a/tests/replication/replication2_pg.erl b/tests/replication/replication2_pg.erl index 4c197d378..95f71e1a7 100644 --- a/tests/replication/replication2_pg.erl +++ b/tests/replication/replication2_pg.erl @@ -69,7 +69,7 @@ setup_repl_clusters(Conf, SSL) -> rt_config:set_advanced_conf(all, Conf), - Nodes = [ANodes, BNodes, CNodes] = rt:build_clusters([2, 2, 2]), + Nodes = [ANodes, BNodes, CNodes] = rt_cluster:build_clusters([2, 2, 2]), AFirst = hd(ANodes), BFirst = hd(BNodes), diff --git a/tests/replication/replication2_rt_sink_connection.erl b/tests/replication/replication2_rt_sink_connection.erl index c666bcc02..c9348fd7c 100644 --- a/tests/replication/replication2_rt_sink_connection.erl +++ b/tests/replication/replication2_rt_sink_connection.erl @@ -43,7 +43,7 @@ confirm() -> ]} ], - Nodes = rt:deploy_nodes(NumNodes, Conf), + Nodes = rt_cluster:deploy_nodes(NumNodes, Conf), {ANodes, Rest} = lists:split(2, Nodes), {BNodes, CNodes} = lists:split(2, Rest), diff --git a/tests/replication/replication2_ssl.erl b/tests/replication/replication2_ssl.erl index e8a1bbf0e..be627574a 100644 --- a/tests/replication/replication2_ssl.erl +++ b/tests/replication/replication2_ssl.erl @@ -190,7 +190,7 @@ confirm() -> lager:info("===testing basic connectivity"), - [Node1, Node2] = rt:deploy_nodes(2, BaseConf), + [Node1, Node2] = rt_cluster:deploy_nodes(2, BaseConf), repl_util:name_cluster(Node1, "A"), repl_util:name_cluster(Node2, "B"), @@ -264,7 +264,7 @@ confirm() -> lager:info("Re-deploying 6 nodes"), - Nodes = rt:deploy_nodes(6, BaseConf), + Nodes = rt_cluster:deploy_nodes(6, BaseConf), [rt:wait_until_pingable(N) || N <- Nodes], diff --git a/tests/replication/replication2_upgrade.erl b/tests/replication/replication2_upgrade.erl index 59d74baa5..6f1ac3067 100644 --- a/tests/replication/replication2_upgrade.erl +++ b/tests/replication/replication2_upgrade.erl @@ -32,7 +32,7 @@ confirm() -> NodeConfig = [{FromVersion, Conf} || _ <- lists:seq(1, NumNodes)], - Nodes = rt:deploy_nodes(NodeConfig), + Nodes = rt_cluster:deploy_nodes(NodeConfig), NodeUpgrades = case UpgradeOrder of "forwards" -> diff --git a/tests/replication/replication_object_reformat.erl b/tests/replication/replication_object_reformat.erl index 6ee5e9d68..1163d00a1 100644 --- a/tests/replication/replication_object_reformat.erl +++ b/tests/replication/replication_object_reformat.erl @@ -159,14 +159,14 @@ verify_replication(AVersion, BVersion, Start, End, Realtime) -> ok end, - rt:clean_cluster(lists:flatten(Nodes)). + rt_cluster:clean_cluster(lists:flatten(Nodes)). %% @doc Configure two clusters and set up replication between them, %% return the node list of each cluster. configure_clusters(AVersion, BVersion, Realtime) -> rt_config:set_advanced_conf(all, ?CONF(infinity)), - Nodes = [ANodes, BNodes] = rt:build_clusters([3, 3]), + Nodes = [ANodes, BNodes] = rt_cluster:build_clusters([3, 3]), lager:info("ANodes: ~p", [ANodes]), lager:info("BNodes: ~p", [BNodes]), diff --git a/tests/replication/replication_ssl.erl b/tests/replication/replication_ssl.erl index ff18c7203..800c706a9 100644 --- a/tests/replication/replication_ssl.erl +++ b/tests/replication/replication_ssl.erl @@ -163,7 +163,7 @@ confirm() -> lager:info("===testing basic connectivity"), - [Node1, Node2] = rt:deploy_nodes(2, BaseConf), + [Node1, Node2] = rt_cluster:deploy_nodes(2, BaseConf), Listeners = replication:add_listeners([Node1]), replication:verify_listeners(Listeners), @@ -225,7 +225,7 @@ confirm() -> lager:info("Re-deploying 6 nodes"), - Nodes = rt:deploy_nodes(6, BaseConf), + Nodes = rt_cluster:deploy_nodes(6, BaseConf), [rt:wait_until_pingable(N) || N <- Nodes], diff --git a/tests/replication/replication_stats.erl b/tests/replication/replication_stats.erl index 98734e1af..25294eb43 100644 --- a/tests/replication/replication_stats.erl +++ b/tests/replication/replication_stats.erl @@ -41,7 +41,7 @@ confirm() -> fullsync_enabled_and_started() -> rt_config:set_advanced_conf(all, ?CONF), - [ANodes, BNodes] = rt:build_clusters([3, 3]), + [ANodes, BNodes] = rt_cluster:build_clusters([3, 3]), AFirst = hd(ANodes), BFirst = hd(BNodes), @@ -99,7 +99,7 @@ fullsync_enabled_and_started() -> fail end, - rt:clean_cluster(ANodes), - rt:clean_cluster(BNodes), + rt_cluster:clean_cluster(ANodes), + rt_cluster:clean_cluster(BNodes), Result. diff --git a/tests/replication/replication_upgrade.erl b/tests/replication/replication_upgrade.erl index 0e24271f2..e4bb26169 100644 --- a/tests/replication/replication_upgrade.erl +++ b/tests/replication/replication_upgrade.erl @@ -25,7 +25,7 @@ confirm() -> NodeConfig = [{FromVersion, Conf} || _ <- lists:seq(1, NumNodes)], - Nodes = rt:deploy_nodes(NodeConfig), + Nodes = rt_cluster:deploy_nodes(NodeConfig), NodeUpgrades = case UpgradeOrder of "forwards" -> diff --git a/tests/replication/rt_cascading.erl b/tests/replication/rt_cascading.erl index a47e29b51..c8827c6b3 100644 --- a/tests/replication/rt_cascading.erl +++ b/tests/replication/rt_cascading.erl @@ -64,7 +64,7 @@ simple_test_() -> % +-----------+ +--------+ +-----+ {timeout, timeout(90), {setup, fun() -> Conf = conf(), - [BeginNode, MiddleNode, EndNode] = Nodes = rt:deploy_nodes(3, Conf), + [BeginNode, MiddleNode, EndNode] = Nodes = rt_cluster:deploy_nodes(3, Conf), repl_util:make_cluster([BeginNode]), repl_util:make_cluster([MiddleNode]), repl_util:make_cluster([EndNode]), @@ -78,7 +78,7 @@ simple_test_() -> fun(State) -> Nodes = [State#simple_state.beginning, State#simple_state.middle, State#simple_state.ending], - rt:clean_cluster(Nodes) + rt_cluster:clean_cluster(Nodes) end, fun(State) -> [ @@ -161,7 +161,7 @@ big_circle_test_() -> % +---+ {timeout, timeout(130), {setup, fun() -> Conf = conf(), - Nodes = rt:deploy_nodes(6, Conf), + Nodes = rt_cluster:deploy_nodes(6, Conf), [repl_util:make_cluster([N]) || N <- Nodes], [repl_util:wait_until_is_leader(N) || N <- Nodes], Names = ["1", "2", "3", "4", "5", "6"], @@ -181,7 +181,7 @@ big_circle_test_() -> Nodes end, fun(Nodes) -> - rt:clean_cluster(Nodes) + rt_cluster:clean_cluster(Nodes) end, fun(Nodes) -> [ @@ -262,7 +262,7 @@ circle_test_() -> % +-------+ +-----+ {timeout, timeout(30), {setup, fun() -> Conf = conf(), - [One, Two, Three] = Nodes = rt:deploy_nodes(3, Conf), + [One, Two, Three] = Nodes = rt_cluster:deploy_nodes(3, Conf), [repl_util:make_cluster([N]) || N <- Nodes], [repl_util:wait_until_is_leader(N) || N <- Nodes], Names = ["one", "two", "three"], @@ -280,7 +280,7 @@ circle_test_() -> Nodes end, fun(Nodes) -> - rt:clean_cluster(Nodes) + rt_cluster:clean_cluster(Nodes) end, fun(Nodes) -> [ @@ -333,7 +333,7 @@ pyramid_test_() -> {timeout, timeout(70), {setup, fun() -> Conf = conf(), - [Top, Left, Left2, Right, Right2] = Nodes = rt:deploy_nodes(5, Conf), + [Top, Left, Left2, Right, Right2] = Nodes = rt_cluster:deploy_nodes(5, Conf), [repl_util:make_cluster([N]) || N <- Nodes], [repl_util:wait_until_is_leader(N) || N <- Nodes], Names = ["top", "left", "left2", "right", "right2"], @@ -349,7 +349,7 @@ pyramid_test_() -> Nodes end, fun(Nodes) -> - rt:clean_cluster(Nodes) + rt_cluster:clean_cluster(Nodes) end, fun(Nodes) -> [ @@ -387,7 +387,7 @@ diamond_test_() -> % +--------+ {timeout, timeout(180), {setup, fun() -> Conf = conf(), - [Top, MidLeft, MidRight, Bottom] = Nodes = rt:deploy_nodes(4, Conf), + [Top, MidLeft, MidRight, Bottom] = Nodes = rt_cluster:deploy_nodes(4, Conf), [repl_util:make_cluster([N]) || N <- Nodes], Names = ["top", "midleft", "midright", "bottom"], [repl_util:name_cluster(Node, Name) || {Node, Name} <- lists:zip(Nodes, Names)], @@ -403,7 +403,7 @@ diamond_test_() -> Nodes end, fun(Nodes) -> - rt:clean_cluster(Nodes) + rt_cluster:clean_cluster(Nodes) end, fun(Nodes) -> [ @@ -479,7 +479,7 @@ circle_and_spurs_test_() -> % +-----------+ +------+ +------+ +-----------+ {timeout, timeout(170), {setup, fun() -> Conf = conf(), - [North, East, West, NorthSpur, EastSpur, WestSpur] = Nodes = rt:deploy_nodes(6, Conf), + [North, East, West, NorthSpur, EastSpur, WestSpur] = Nodes = rt_cluster:deploy_nodes(6, Conf), [repl_util:make_cluster([N]) || N <- Nodes], Names = ["north", "east", "west", "north_spur", "east_spur", "west_spur"], [repl_util:name_cluster(Node, Name) || {Node, Name} <- lists:zip(Nodes, Names)], @@ -493,7 +493,7 @@ circle_and_spurs_test_() -> Nodes end, fun(Nodes) -> - rt:clean_cluster(Nodes) + rt_cluster:clean_cluster(Nodes) end, fun(Nodes) -> [ @@ -580,7 +580,7 @@ mixed_version_clusters_test_dep() -> {timeout, 60000, {setup, fun() -> Conf = conf(), DeployConfs = [{previous, Conf} || _ <- lists:seq(1,6)], - Nodes = rt:deploy_nodes(DeployConfs), + Nodes = rt_cluster:deploy_nodes(DeployConfs), [N1, N2, N3, N4, N5, N6] = Nodes, case rpc:call(N1, application, get_key, [riak_core, vsn]) of % this is meant to test upgrading from early BNW aka @@ -611,7 +611,7 @@ mixed_version_clusters_test_dep() -> {too_old, Ns} -> Ns; _ -> MaybeNodes end, - rt:clean_cluster(Nodes) + rt_cluster:clean_cluster(Nodes) end, fun({too_old, _Nodes}) -> []; ([N1, N2, N3, N4, N5, N6] = Nodes) -> [ @@ -793,7 +793,7 @@ new_to_old_test_dep() -> {timeout, timeout(105), {setup, fun() -> Conf = conf(), DeployConfs = [{current, Conf}, {previous, Conf}, {current, Conf}], - [New1, Old2, New3] = Nodes = rt:deploy_nodes(DeployConfs), + [New1, Old2, New3] = Nodes = rt_cluster:deploy_nodes(DeployConfs), case rpc:call(Old2, application, get_key, [riak_core, vsn]) of % this is meant to test upgrading from early BNW aka % Brave New World aka Advanced Repl aka version 3 repl to @@ -817,7 +817,7 @@ new_to_old_test_dep() -> {too_old, Ns} -> Ns; _ -> MaybeNodes end, - rt:clean_cluster(Nodes) + rt_cluster:clean_cluster(Nodes) end, fun({too_old, _}) -> []; ([New1, Old2, New3]) -> [ @@ -873,7 +873,7 @@ new_to_old_test_dep() -> ensure_ack_test_() -> {timeout, timeout(130), {setup, fun() -> Conf = conf(), - [LeaderA, LeaderB] = Nodes = rt:deploy_nodes(2, Conf), + [LeaderA, LeaderB] = Nodes = rt_cluster:deploy_nodes(2, Conf), [repl_util:make_cluster([N]) || N <- Nodes], [repl_util:wait_until_is_leader(N) || N <- Nodes], Names = ["A", "B"], @@ -888,7 +888,7 @@ ensure_ack_test_() -> [LeaderA, LeaderB] end, fun(Nodes) -> - rt:clean_cluster(Nodes) + rt_cluster:clean_cluster(Nodes) end, fun([LeaderA, LeaderB] = _Nodes) -> [ @@ -928,7 +928,7 @@ ensure_unacked_and_queue() -> ensure_unacked_and_queue_test_() -> {timeout, timeout(2300), {setup, fun() -> - Nodes = rt:deploy_nodes(6, conf()), + Nodes = rt_cluster:deploy_nodes(6, conf()), {N123, N456} = lists:split(3, Nodes), repl_util:make_cluster(N123), repl_util:make_cluster(N456), @@ -943,8 +943,8 @@ ensure_unacked_and_queue_test_() -> {N123, N456} end, maybe_skip_teardown(fun({N123, N456}) -> - rt:clean_cluster(N123), - rt:clean_cluster(N456) + rt_cluster:clean_cluster(N123), + rt_cluster:clean_cluster(N456) end), fun({N123, N456}) -> [ diff --git a/tests/riak_admin_console_tests.erl b/tests/riak_admin_console_tests.erl index 8084b8424..04bd8ea48 100644 --- a/tests/riak_admin_console_tests.erl +++ b/tests/riak_admin_console_tests.erl @@ -154,7 +154,7 @@ riak_admin_tests(Node) -> confirm() -> %% Deploy a node to test against lager:info("Deploy node to test riak command line"), - [Node] = rt:deploy_nodes(1), + [Node] = rt_cluster:deploy_nodes(1), ?assertEqual(ok, rt:wait_until_nodes_ready([Node])), rt_intercept:add(Node, {riak_core_console, diff --git a/tests/riak_control.erl b/tests/riak_control.erl index 8ad4878b7..661dc093d 100644 --- a/tests/riak_control.erl +++ b/tests/riak_control.erl @@ -46,7 +46,7 @@ verify_upgrade(Vsn) -> lager:info("Verify upgrade from ~p to current.", [Vsn]), lager:info("Building cluster."), - [Nodes] = rt:build_clusters([{3, Vsn, ?RC_ENABLE_CFG}]), + [Nodes] = rt_cluster:build_clusters([{3, Vsn, ?RC_ENABLE_CFG}]), lager:info("Verifying all nodes are alive."), verify_alive(Nodes), diff --git a/tests/riak_control_authentication.erl b/tests/riak_control_authentication.erl index a8bbb7f8f..a1fce9f70 100644 --- a/tests/riak_control_authentication.erl +++ b/tests/riak_control_authentication.erl @@ -209,7 +209,7 @@ verify_authentication(current, ?RC_AUTH_USERLIST_CONFIG_NO_FORCE_SSL) -> %% @doc Build a one node cluster. build_singleton_cluster(Vsn, Config) -> - [Nodes] = rt:build_clusters([{1, Vsn, Config}]), + [Nodes] = rt_cluster:build_clusters([{1, Vsn, Config}]), %% Start and stop, wait for riak_kv. %% diff --git a/tests/riak_rex.erl b/tests/riak_rex.erl index 30ace1353..ed0052e0b 100644 --- a/tests/riak_rex.erl +++ b/tests/riak_rex.erl @@ -33,10 +33,10 @@ rex_test(Node) -> deploy_node(NumNodes, current) -> - rt:deploy_nodes(NumNodes, conf()); + rt_cluster:deploy_nodes(NumNodes, conf()); deploy_node(_, mixed) -> Conf = conf(), - rt:deploy_nodes([{current, Conf}, {previous, Conf}]). + rt_cluster:deploy_nodes([{current, Conf}, {previous, Conf}]). deploy_node(Type) -> NumNodes = rt_config:get(num_nodes, 1), diff --git a/tests/riaknostic_rt.erl b/tests/riaknostic_rt.erl index 85ed822a5..43fa03449 100644 --- a/tests/riaknostic_rt.erl +++ b/tests/riaknostic_rt.erl @@ -29,7 +29,7 @@ confirm() -> %% Build a small cluster - [Node1, _Node2] = rt:build_cluster(2, []), + [Node1, _Node2] = rt_cluster:build_cluster(2, []), ?assertEqual(ok, rt:wait_until_nodes_ready([Node1])), %% Install riaknostic for Riak versions below 1.3.0 diff --git a/tests/rolling_capabilities.erl b/tests/rolling_capabilities.erl index 5b05788b4..e87a9fe72 100644 --- a/tests/rolling_capabilities.erl +++ b/tests/rolling_capabilities.erl @@ -55,7 +55,7 @@ confirm() -> end, lager:info("Deploying Riak ~p cluster", [OldVsn]), - Nodes = rt:build_cluster([OldVsn || _ <- lists:seq(1,Count)]), + Nodes = rt_cluster:build_cluster([OldVsn || _ <- lists:seq(1,Count)]), lists:foldl(fun(Node, Upgraded) -> rt:upgrade(Node, current), Upgraded2 = Upgraded ++ [Node], diff --git a/tests/rt_basic_test.erl b/tests/rt_basic_test.erl index 91881c57f..67aeae122 100644 --- a/tests/rt_basic_test.erl +++ b/tests/rt_basic_test.erl @@ -23,7 +23,7 @@ confirm() -> lager:info("Deploy some nodes"), - Nodes = rt:deploy_nodes(2), + Nodes = rt_cluster:deploy_nodes(2), lager:info("Stop the nodes"), [rt:stop(Node) || Node <- Nodes], pass. diff --git a/tests/sibling_explosion.erl b/tests/sibling_explosion.erl index aee706c68..68b8760a9 100644 --- a/tests/sibling_explosion.erl +++ b/tests/sibling_explosion.erl @@ -21,7 +21,7 @@ confirm() -> Conf = [{riak_core, [{default_bucket_props, [{allow_mult, true}, {dvv_enabled, true}]}]}], - [Node1] = rt:deploy_nodes(1, Conf), + [Node1] = rt_cluster:deploy_nodes(1, Conf), N = 100, lager:info("Put new object in ~p via PBC.", [Node1]), diff --git a/tests/test_cluster.erl b/tests/test_cluster.erl index b19d15bb3..165a0a2d7 100644 --- a/tests/test_cluster.erl +++ b/tests/test_cluster.erl @@ -24,6 +24,6 @@ confirm() -> Config = [{riak_search, [{enabled, true}]}], - rt:build_cluster(4, Config), + rt_cluster:build_cluster(4, Config), ?assert(false), fail. diff --git a/tests/verify_2i_aae.erl b/tests/verify_2i_aae.erl index 1ea4c0e2d..39b7709ef 100644 --- a/tests/verify_2i_aae.erl +++ b/tests/verify_2i_aae.erl @@ -31,7 +31,7 @@ -define(N_VAL, 3). confirm() -> - [Node1] = rt:build_cluster(1, + [Node1] = rt_cluster:build_cluster(1, [{riak_kv, [{anti_entropy, {off, []}}, {anti_entropy_build_limit, {100, 500}}, diff --git a/tests/verify_2i_limit.erl b/tests/verify_2i_limit.erl index 944b371d4..66a7e7a9c 100644 --- a/tests/verify_2i_limit.erl +++ b/tests/verify_2i_limit.erl @@ -31,7 +31,7 @@ confirm() -> inets:start(), - Nodes = rt:build_cluster(3), + Nodes = rt_cluster:build_cluster(3), ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), RiakHttp = rt:httpc(hd(Nodes)), diff --git a/tests/verify_2i_mixed_cluster.erl b/tests/verify_2i_mixed_cluster.erl index 02369948b..95c5d597e 100644 --- a/tests/verify_2i_mixed_cluster.erl +++ b/tests/verify_2i_mixed_cluster.erl @@ -31,7 +31,7 @@ confirm() -> OldVsn = proplists:get_value(upgrade_version, TestMetaData, previous), Nodes = [CurrentNode, OldNode1, _] = - rt:build_cluster([{current, + rt_cluster:build_cluster([{current, [{riak_kv, [{anti_entropy, {off, []}}]}]}, OldVsn, OldVsn]), ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), diff --git a/tests/verify_2i_returnterms.erl b/tests/verify_2i_returnterms.erl index 7a9f50ee4..2708a2de8 100644 --- a/tests/verify_2i_returnterms.erl +++ b/tests/verify_2i_returnterms.erl @@ -30,7 +30,7 @@ confirm() -> inets:start(), - Nodes = rt:build_cluster(3), + Nodes = rt_cluster:build_cluster(3), ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), RiakHttp = rt:http_url(hd(Nodes)), diff --git a/tests/verify_2i_stream.erl b/tests/verify_2i_stream.erl index 32aff6939..638d68691 100644 --- a/tests/verify_2i_stream.erl +++ b/tests/verify_2i_stream.erl @@ -29,7 +29,7 @@ confirm() -> inets:start(), - Nodes = rt:build_cluster(3), + Nodes = rt_cluster:build_cluster(3), ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), RiakHttp = rt:http_url(hd(Nodes)), diff --git a/tests/verify_2i_timeout.erl b/tests/verify_2i_timeout.erl index d913b5631..e17c09caf 100644 --- a/tests/verify_2i_timeout.erl +++ b/tests/verify_2i_timeout.erl @@ -29,7 +29,7 @@ confirm() -> inets:start(), Config = [{riak_kv, [{secondary_index_timeout, 1}]}], %% ludicrously short, should fail always - Nodes = rt:build_cluster([{current, Config}, {current, Config}, {current, Config}]), + Nodes = rt_cluster:build_cluster([{current, Config}, {current, Config}, {current, Config}]), ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), PBPid = rt:pbc(hd(Nodes)), diff --git a/tests/verify_aae.erl b/tests/verify_aae.erl index 1d5fa2dda..97f665d63 100644 --- a/tests/verify_aae.erl +++ b/tests/verify_aae.erl @@ -64,7 +64,7 @@ -define(N_VAL, 3). confirm() -> - Nodes = rt:build_cluster(?NUM_NODES, ?CFG), + Nodes = rt_cluster:build_cluster(?NUM_NODES, ?CFG), verify_aae(Nodes), pass. @@ -242,12 +242,12 @@ test_less_than_n_mods(Node, KeyValues) -> wipe_out_partition(Node, Partition) -> lager:info("Wiping out partition ~p in node ~p", [Partition, Node]), - rt:clean_data_dir(Node, dir_for_partition(Partition)), + rt_cluster:clean_data_dir(Node, dir_for_partition(Partition)), ok. wipe_out_aae_data(Node, Partition) -> lager:info("Wiping out AAE data for partition ~p in node ~p", [Partition, Node]), - rt:clean_data_dir(Node, "anti_entropy/"++integer_to_list(Partition)), + rt_cluster:clean_data_dir(Node, "anti_entropy/"++integer_to_list(Partition)), ok. base_dir_for_backend(undefined) -> diff --git a/tests/verify_api_timeouts.erl b/tests/verify_api_timeouts.erl index 0a6851cfd..5cd22e08f 100644 --- a/tests/verify_api_timeouts.erl +++ b/tests/verify_api_timeouts.erl @@ -9,7 +9,7 @@ confirm() -> %% test requires allow_mult=false b/c of rt:systest_read - [Node] = rt:build_cluster(1), + [Node] = rt_cluster:build_cluster(1), rt:wait_until_pingable(Node), HC = rt:httpc(Node), diff --git a/tests/verify_asis_put.erl b/tests/verify_asis_put.erl index d34439c0f..f89bd8517 100644 --- a/tests/verify_asis_put.erl +++ b/tests/verify_asis_put.erl @@ -4,7 +4,7 @@ confirm() -> %% 1. Deploy two nodes - [Node1, Node2] = rt:deploy_nodes(2), + [Node1, Node2] = rt_cluster:deploy_nodes(2), %% 2. With PBC lager:info("Put new object in ~p via PBC.", [Node1]), PB1 = rt:pbc(Node1), diff --git a/tests/verify_backup_restore.erl b/tests/verify_backup_restore.erl index b587160ad..aec9a8bb3 100644 --- a/tests/verify_backup_restore.erl +++ b/tests/verify_backup_restore.erl @@ -39,7 +39,7 @@ confirm() -> lager:info("Building cluster of ~p nodes", [?NUM_NODES]), SpamDir = rt_config:config_or_os_env(spam_dir), Config = [{riak_search, [{enabled, true}]}], - [Node0 | _RestNodes] = Nodes = rt:build_cluster(?NUM_NODES, Config), + [Node0 | _RestNodes] = Nodes = rt_cluster:build_cluster(?NUM_NODES, Config), rt:enable_search_hook(Node0, ?SEARCH_BUCKET), rt:wait_until_ring_converged(Nodes), PbcPid = rt:pbc(Node0), @@ -135,9 +135,9 @@ confirm() -> lager:info("Wipe out entire cluster and start fresh"), riakc_pb_socket:stop(PbcPid), - rt:clean_cluster(Nodes), + rt_cluster:clean_cluster(Nodes), lager:info("Rebuilding the cluster"), - rt:build_cluster(?NUM_NODES, Config), + rt_cluster:build_cluster(?NUM_NODES, Config), rt:enable_search_hook(Node0, ?SEARCH_BUCKET), rt:wait_until_ring_converged(Nodes), rt:wait_until_no_pending_changes(Nodes), diff --git a/tests/verify_basic_upgrade.erl b/tests/verify_basic_upgrade.erl index f02a7cc11..b1c17556a 100644 --- a/tests/verify_basic_upgrade.erl +++ b/tests/verify_basic_upgrade.erl @@ -26,7 +26,7 @@ confirm() -> TestMetaData = riak_test_runner:metadata(), OldVsn = proplists:get_value(upgrade_version, TestMetaData, previous), - Nodes = [Node1|_] = rt:build_cluster([OldVsn, OldVsn, OldVsn, OldVsn]), + Nodes = [Node1|_] = rt_cluster:build_cluster([OldVsn, OldVsn, OldVsn, OldVsn]), lager:info("Writing 100 keys to ~p", [Node1]), rt:systest_write(Node1, 100, 3), diff --git a/tests/verify_bitcask_tombstone2_upgrade.erl b/tests/verify_bitcask_tombstone2_upgrade.erl index 72543e2c8..b6ebcf695 100644 --- a/tests/verify_bitcask_tombstone2_upgrade.erl +++ b/tests/verify_bitcask_tombstone2_upgrade.erl @@ -19,7 +19,7 @@ confirm() -> % Configure for fast merge checks Config = [{riak_kv, [{bitcask_merge_check_interval, 2000}]}, {bitcask, [{max_file_size, 100}]}], - Nodes = rt:build_cluster([{OldVsn, Config}]), + Nodes = rt_cluster:build_cluster([{OldVsn, Config}]), verify_bitcask_tombstone2_upgrade(Nodes), pass. diff --git a/tests/verify_busy_dist_port.erl b/tests/verify_busy_dist_port.erl index c8828d902..ac29da4a6 100644 --- a/tests/verify_busy_dist_port.erl +++ b/tests/verify_busy_dist_port.erl @@ -47,7 +47,7 @@ -include_lib("eunit/include/eunit.hrl"). confirm() -> - [Node1, Node2] = rt:build_cluster(2), + [Node1, Node2] = rt_cluster:build_cluster(2), lager:info("deployed 2 nodes"), rt:load_modules_on_nodes([cause_bdp, verify_bdp_event_handler, diff --git a/tests/verify_capabilities.erl b/tests/verify_capabilities.erl index 2730eca85..d689a0229 100644 --- a/tests/verify_capabilities.erl +++ b/tests/verify_capabilities.erl @@ -31,7 +31,7 @@ confirm() -> _ -> current end, - Nodes = rt:deploy_nodes([current, previous, Legacy]), + Nodes = rt_cluster:deploy_nodes([current, previous, Legacy]), [CNode, PNode, LNode] = Nodes, lager:info("Verifying known capabilities on a Current 1-node cluster"), diff --git a/tests/verify_commit_hooks.erl b/tests/verify_commit_hooks.erl index fdc8ca40a..b2d57636e 100644 --- a/tests/verify_commit_hooks.erl +++ b/tests/verify_commit_hooks.erl @@ -23,7 +23,7 @@ -export([confirm/0]). confirm() -> - [Node] = rt:deploy_nodes(1), + [Node] = rt_cluster:deploy_nodes(1), lager:info("Loading the hooks module into ~p", [Node]), rt:load_modules_on_nodes([hooks], [Node]), diff --git a/tests/verify_conditional_postcommit.erl b/tests/verify_conditional_postcommit.erl index fcb2c5f0c..cbf79b705 100644 --- a/tests/verify_conditional_postcommit.erl +++ b/tests/verify_conditional_postcommit.erl @@ -24,7 +24,7 @@ confirm() -> Config = [{riak_core, [{vnode_management_timer, 1000}, {ring_creation_size, 4}]}], - Nodes = rt:deploy_nodes(1, Config), + Nodes = rt_cluster:deploy_nodes(1, Config), Node = hd(Nodes), ok = rt:load_modules_on_nodes([?MODULE], Nodes), diff --git a/tests/verify_counter_capability.erl b/tests/verify_counter_capability.erl index 59d2460de..5f8670a45 100644 --- a/tests/verify_counter_capability.erl +++ b/tests/verify_counter_capability.erl @@ -39,7 +39,7 @@ confirm() -> %% Upgrade nodes to previous %% Get put on all nodes Config = [], - [Legacy, Previous]=Nodes = rt:build_cluster([{legacy, Config}, {previous, Config}]), + [Legacy, Previous]=Nodes = rt_cluster:build_cluster([{legacy, Config}, {previous, Config}]), ?assertEqual(ok, rt:wait_until_capability(Previous, {riak_kv, crdt}, [])), verify_counter_converge:set_allow_mult_true(Nodes), diff --git a/tests/verify_counter_converge.erl b/tests/verify_counter_converge.erl index 2217d582b..adc067b40 100644 --- a/tests/verify_counter_converge.erl +++ b/tests/verify_counter_converge.erl @@ -33,7 +33,7 @@ confirm() -> Key = <<"a">>, - [N1, N2, N3, N4]=Nodes = rt:build_cluster(4), + [N1, N2, N3, N4]=Nodes = rt_cluster:build_cluster(4), [C1, C2, C3, C4]=Clients = [ rt:httpc(N) || N <- Nodes ], set_allow_mult_true(Nodes), diff --git a/tests/verify_counter_repl.erl b/tests/verify_counter_repl.erl index 5b6a1db49..b1a0689d2 100644 --- a/tests/verify_counter_repl.erl +++ b/tests/verify_counter_repl.erl @@ -63,7 +63,7 @@ make_clusters() -> Conf = [{riak_repl, [{fullsync_on_connect, false}, {fullsync_interval, disabled}]}, {riak_core, [{default_bucket_props, [{allow_mult, true}]}]}], - Nodes = rt:deploy_nodes(6, Conf), + Nodes = rt_cluster:deploy_nodes(6, Conf), {ClusterA, ClusterB} = lists:split(3, Nodes), A = make_cluster(ClusterA, "A"), B = make_cluster(ClusterB, "B"), diff --git a/tests/verify_crdt_capability.erl b/tests/verify_crdt_capability.erl index 238b65361..05204ae01 100644 --- a/tests/verify_crdt_capability.erl +++ b/tests/verify_crdt_capability.erl @@ -38,7 +38,7 @@ confirm() -> %% Upgrade nodes %% Get put on all nodes Config = [], - [Previous, Current]=Nodes = rt:build_cluster([{previous, Config}, {current, Config}]), + [Previous, Current]=Nodes = rt_cluster:build_cluster([{previous, Config}, {current, Config}]), ?assertEqual(ok, rt:wait_until_capability(Current, {riak_kv, crdt}, [pncounter])), verify_counter_converge:set_allow_mult_true(Nodes), diff --git a/tests/verify_cs_bucket.erl b/tests/verify_cs_bucket.erl index 5db40bbca..3ba21c902 100644 --- a/tests/verify_cs_bucket.erl +++ b/tests/verify_cs_bucket.erl @@ -29,7 +29,7 @@ -define(FOO, <<"foo">>). confirm() -> - Nodes = rt:build_cluster(3), + Nodes = rt_cluster:build_cluster(3), ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), PBPid = rt:pbc(hd(Nodes)), diff --git a/tests/verify_down.erl b/tests/verify_down.erl index 973fb9f10..efbf79d0d 100644 --- a/tests/verify_down.erl +++ b/tests/verify_down.erl @@ -23,7 +23,7 @@ -include_lib("eunit/include/eunit.hrl"). confirm() -> - Nodes = rt:deploy_nodes(3), + Nodes = rt_cluster:deploy_nodes(3), [Node1, Node2, Node3] = Nodes, %% Join node2 to node1 and wait for cluster convergence diff --git a/tests/verify_dt_context.erl b/tests/verify_dt_context.erl index acdd74e27..a9e44d12f 100644 --- a/tests/verify_dt_context.erl +++ b/tests/verify_dt_context.erl @@ -44,7 +44,7 @@ confirm() -> {riak_core, [ {ring_creation_size, 16}, {vnode_management_timer, 1000} ]}], - [N1, N2]=Nodes = rt:build_cluster(2, Config), + [N1, N2]=Nodes = rt_cluster:build_cluster(2, Config), create_bucket_types(Nodes, ?TYPES), diff --git a/tests/verify_dt_converge.erl b/tests/verify_dt_converge.erl index 32d23fe17..5abcbcc5a 100644 --- a/tests/verify_dt_converge.erl +++ b/tests/verify_dt_converge.erl @@ -49,7 +49,7 @@ confirm() -> {riak_core, [ {ring_creation_size, 16}, {vnode_management_timer, 1000} ]}], - [N1, N2, N3, N4]=Nodes = rt:build_cluster(4, Config), + [N1, N2, N3, N4]=Nodes = rt_cluster:build_cluster(4, Config), create_bucket_types(Nodes, ?TYPES), diff --git a/tests/verify_dt_upgrade.erl b/tests/verify_dt_upgrade.erl index 05006dff6..21690c2d6 100644 --- a/tests/verify_dt_upgrade.erl +++ b/tests/verify_dt_upgrade.erl @@ -31,7 +31,7 @@ confirm() -> TestMetaData = riak_test_runner:metadata(), OldVsn = proplists:get_value(upgrade_version, TestMetaData, previous), - Nodes = [Node1|_] = rt:build_cluster([OldVsn, OldVsn, OldVsn, OldVsn]), + Nodes = [Node1|_] = rt_cluster:build_cluster([OldVsn, OldVsn, OldVsn, OldVsn]), verify_counter_converge:set_allow_mult_true(Nodes, ?COUNTER_BUCKET), populate_counters(Node1), diff --git a/tests/verify_dvv_repl.erl b/tests/verify_dvv_repl.erl index 86a873534..2f7f776ad 100644 --- a/tests/verify_dvv_repl.erl +++ b/tests/verify_dvv_repl.erl @@ -61,7 +61,7 @@ make_clusters() -> {riak_core, [{default_bucket_props, [{dvv_enabled, true}, {allow_mult, true}]}]}], - Nodes = rt:deploy_nodes(6, Conf), + Nodes = rt_cluster:deploy_nodes(6, Conf), {ClusterA, ClusterB} = lists:split(3, Nodes), A = make_cluster(ClusterA, "A"), B = make_cluster(ClusterB, "B"), diff --git a/tests/verify_dynamic_ring.erl b/tests/verify_dynamic_ring.erl index d1566a1d3..0e9b9d072 100644 --- a/tests/verify_dynamic_ring.erl +++ b/tests/verify_dynamic_ring.erl @@ -34,7 +34,7 @@ confirm() -> rt_config:set_conf(all, [{"buckets.default.allow_mult", "false"}]), rt_config:update_app_config(all, [{riak_core, [{ring_creation_size, ?START_SIZE}]}]), - [ANode, AnotherNode, YetAnother, _ReplacingNode] = _AllNodes = rt:deploy_nodes(4), + [ANode, AnotherNode, YetAnother, _ReplacingNode] = _AllNodes = rt_cluster:deploy_nodes(4), NewNodes = Nodes = [ANode, AnotherNode, YetAnother], %% This assignment for `NewNodes' is commented until riak_core %% issue #570 is resolved diff --git a/tests/verify_handoff.erl b/tests/verify_handoff.erl index cd7c5290e..3a457412c 100644 --- a/tests/verify_handoff.erl +++ b/tests/verify_handoff.erl @@ -126,11 +126,11 @@ assert_using(Node, {CapabilityCategory, CapabilityName}, ExpectedCapabilityName) %% For some testing purposes, making these limits smaller is helpful: deploy_test_nodes(false, N) -> - rt:deploy_nodes(N); + rt_cluster:deploy_nodes(N); deploy_test_nodes(true, N) -> lager:info("WARNING: Using turbo settings for testing."), Config = [{riak_core, [{forced_ownership_handoff, 8}, {handoff_concurrency, 8}, {vnode_inactivity_timeout, 1000}, {gossip_limit, {10000000, 60000}}]}], - rt:deploy_nodes(N, Config). + rt_cluster:deploy_nodes(N, Config). diff --git a/tests/verify_handoff_mixed.erl b/tests/verify_handoff_mixed.erl index c4a74414c..0370a49d0 100644 --- a/tests/verify_handoff_mixed.erl +++ b/tests/verify_handoff_mixed.erl @@ -58,7 +58,7 @@ confirm() -> Versions = [{current, SearchEnabled}, {UpgradeVsn, SearchEnabled}], Services = [riak_kv, riak_search, riak_pipe], - [Current, Old] = Nodes = rt:deploy_nodes(Versions, Services), + [Current, Old] = Nodes = rt_cluster:deploy_nodes(Versions, Services), prepare_vnodes(Current), diff --git a/tests/verify_kv_health_check.erl b/tests/verify_kv_health_check.erl index ebca92d1e..abde0d7aa 100644 --- a/tests/verify_kv_health_check.erl +++ b/tests/verify_kv_health_check.erl @@ -22,7 +22,7 @@ -export([confirm/0]). confirm() -> - [Node1, Node2, _Node3] = rt:build_cluster(3), + [Node1, Node2, _Node3] = rt_cluster:build_cluster(3), %% add intercept that delays handling of vnode commands %% on a single node (the "slow" node) diff --git a/tests/verify_link_walk_urls.erl b/tests/verify_link_walk_urls.erl index ff5fcb044..ba174b875 100644 --- a/tests/verify_link_walk_urls.erl +++ b/tests/verify_link_walk_urls.erl @@ -33,7 +33,7 @@ confirm() -> - [Node0 | _] = rt:build_cluster(?NUM_NODES), + [Node0 | _] = rt_cluster:build_cluster(?NUM_NODES), Pbc = rt:pbc(Node0), lager:info("Inserting linked graph"), diff --git a/tests/verify_listkeys.erl b/tests/verify_listkeys.erl index 33af44937..087d1b8dd 100644 --- a/tests/verify_listkeys.erl +++ b/tests/verify_listkeys.erl @@ -29,7 +29,7 @@ -define(UNDEFINED_BUCKET_TYPE, <<"880bf69d-5dab-44ee-8762-d24c6f759ce1">>). confirm() -> - [Node1, Node2, Node3, Node4] = Nodes = rt:deploy_nodes(4), + [Node1, Node2, Node3, Node4] = Nodes = rt_cluster:deploy_nodes(4), ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), lager:info("Nodes deployed, but not joined."), diff --git a/tests/verify_listkeys_eqcfsm.erl b/tests/verify_listkeys_eqcfsm.erl index d8f5f01ee..bb751fd6b 100644 --- a/tests/verify_listkeys_eqcfsm.erl +++ b/tests/verify_listkeys_eqcfsm.erl @@ -69,7 +69,7 @@ prop_test() -> [lager:info(" Command : ~p~n", [Cmd]) || Cmd <- Cmds], {H, _S, Res} = run_commands(?MODULE, Cmds, [{nodelist, Nodes}]), lager:info("======================== Ran commands"), - rt:clean_cluster(Nodes), + rt_cluster:clean_cluster(Nodes), aggregate(zip(state_names(H),command_names(Cmds)), equals(Res, ok)) end))). @@ -178,7 +178,7 @@ log_transition(S) -> %% Helpers %% ==================================================================== setup_cluster(NumNodes) -> - Nodes = rt:build_cluster(NumNodes), + Nodes = rt_cluster:build_cluster(NumNodes), ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), ?assertEqual(ok, rt:wait_until_transfers_complete(Nodes)), Node = hd(Nodes), diff --git a/tests/verify_membackend.erl b/tests/verify_membackend.erl index 536ef1f56..e61f57990 100644 --- a/tests/verify_membackend.erl +++ b/tests/verify_membackend.erl @@ -35,16 +35,16 @@ confirm() -> ttl(Mode) -> Conf = mkconf(ttl, Mode), - [NodeA, NodeB] = rt:deploy_nodes(2, Conf), + [NodeA, NodeB] = rt_cluster:deploy_nodes(2, Conf), ?assertEqual(ok, check_leave_and_expiry(NodeA, NodeB)), - rt:clean_cluster([NodeA]), + rt_cluster:clean_cluster([NodeA]), ok. max_memory(Mode) -> Conf = mkconf(max_memory, Mode), - [NodeA, NodeB] = rt:deploy_nodes(2, Conf), + [NodeA, NodeB] = rt_cluster:deploy_nodes(2, Conf), rt:join(NodeB, NodeA), @@ -54,14 +54,14 @@ max_memory(Mode) -> ?assertEqual(ok, check_eviction(NodeA)), - rt:clean_cluster([NodeA, NodeB]), + rt_cluster:clean_cluster([NodeA, NodeB]), ok. combo(Mode) -> Conf = mkconf(combo, Mode), - [NodeA, NodeB] = rt:deploy_nodes(2, Conf), + [NodeA, NodeB] = rt_cluster:deploy_nodes(2, Conf), ?assertEqual(ok, check_leave_and_expiry(NodeA, NodeB)), @@ -75,7 +75,7 @@ combo(Mode) -> ?assertEqual(ok, check_eviction(NodeA)), - rt:clean_cluster([NodeA]), + rt_cluster:clean_cluster([NodeA]), ok. diff --git a/tests/verify_mr_prereduce_node_down.erl b/tests/verify_mr_prereduce_node_down.erl index 265dd7a29..e6bc2d914 100644 --- a/tests/verify_mr_prereduce_node_down.erl +++ b/tests/verify_mr_prereduce_node_down.erl @@ -44,7 +44,7 @@ confirm() -> NodeCount = 4, lager:info("Build ~b-node cluster", [NodeCount]), - [Primary,ToKill|_] = rt:build_cluster(NodeCount), + [Primary,ToKill|_] = rt_cluster:build_cluster(NodeCount), %% We need one node down for this test rt:stop(ToKill), diff --git a/tests/verify_no_writes_on_read.erl b/tests/verify_no_writes_on_read.erl index ca4e95d31..5b48ed048 100644 --- a/tests/verify_no_writes_on_read.erl +++ b/tests/verify_no_writes_on_read.erl @@ -11,7 +11,7 @@ confirm() -> Backend = proplists:get_value(backend, riak_test_runner:metadata()), lager:info("Running with backend ~p", [Backend]), ?assertEqual(bitcask, Backend), - [Node1 | _Rest] = _Nodes = rt:build_cluster(?NUM_NODES), + [Node1 | _Rest] = _Nodes = rt_cluster:build_cluster(?NUM_NODES), PBC = rt:pbc(Node1), lager:info("Setting last write wins on bucket"), B = ?BUCKET, diff --git a/tests/verify_object_limits.erl b/tests/verify_object_limits.erl index fd8af35dc..a989eed1d 100644 --- a/tests/verify_object_limits.erl +++ b/tests/verify_object_limits.erl @@ -34,7 +34,7 @@ confirm() -> - [Node1] = rt:build_cluster(1, [{riak_kv, [ + [Node1] = rt_cluster:build_cluster(1, [{riak_kv, [ {ring_creation_size, 8}, {max_object_size, ?MAX_SIZE}, {warn_object_size, ?WARN_SIZE}, diff --git a/tests/verify_reset_bucket_props.erl b/tests/verify_reset_bucket_props.erl index 1c099a81c..c3740bc94 100644 --- a/tests/verify_reset_bucket_props.erl +++ b/tests/verify_reset_bucket_props.erl @@ -29,7 +29,7 @@ confirm() -> %% we will be using two of the nodes to perform an %% update and then a reset (one on each node) of a bucket's properties. %% All nodes are checked to make sure the reset is affected on them - [Node1, Node2, Node3] = Nodes = rt:build_cluster(3), + [Node1, Node2, Node3] = Nodes = rt_cluster:build_cluster(3), DefaultProps = get_current_bucket_props(Nodes, ?BUCKET), diff --git a/tests/verify_riak_lager.erl b/tests/verify_riak_lager.erl index 98505538b..b902048d6 100644 --- a/tests/verify_riak_lager.erl +++ b/tests/verify_riak_lager.erl @@ -29,7 +29,7 @@ confirm() -> lager:info("Staring a node"), - Nodes = [Node] = rt:deploy_nodes(1), + Nodes = [Node] = rt_cluster:deploy_nodes(1), ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), lager:info("Stopping that node"), diff --git a/tests/verify_riak_object_reformat.erl b/tests/verify_riak_object_reformat.erl index cd693f4ff..bdb9a9c6d 100644 --- a/tests/verify_riak_object_reformat.erl +++ b/tests/verify_riak_object_reformat.erl @@ -34,7 +34,7 @@ confirm() -> rt_config:update_app_config(all, [{riak_kv, [{object_format, v1}]}]), TestMetaData = riak_test_runner:metadata(), DowngradeVsn = proplists:get_value(upgrade_version, TestMetaData, previous), - Nodes = [Node1|_] = rt:build_cluster(?N), + Nodes = [Node1|_] = rt_cluster:build_cluster(?N), [rt:wait_until_capability(N, {riak_kv, object_format}, v1, v0) || N <- Nodes], diff --git a/tests/verify_riak_stats.erl b/tests/verify_riak_stats.erl index 10568b91a..87e7d224c 100644 --- a/tests/verify_riak_stats.erl +++ b/tests/verify_riak_stats.erl @@ -24,7 +24,7 @@ %% You should have curl installed locally to do this. confirm() -> - Nodes = rt:deploy_nodes(1), + Nodes = rt_cluster:deploy_nodes(1), [Node1] = Nodes, ?assertEqual(ok, rt:wait_until_nodes_ready([Node1])), Stats1 = get_stats(Node1), diff --git a/tests/verify_search.erl b/tests/verify_search.erl index 2d98ebb32..0444180ab 100644 --- a/tests/verify_search.erl +++ b/tests/verify_search.erl @@ -31,7 +31,7 @@ confirm() -> Config = [{riak_search, [{enabled, true}]}], - [Node0 | _RestNodes] = Nodes = rt:build_cluster(3, Config), + [Node0 | _RestNodes] = Nodes = rt_cluster:build_cluster(3, Config), rt:wait_until_ring_converged(Nodes), Path = rt_config:get(rt_scratch_dir), diff --git a/tests/verify_secondary_index_reformat.erl b/tests/verify_secondary_index_reformat.erl index 1defc4ec0..a1455deb1 100644 --- a/tests/verify_secondary_index_reformat.erl +++ b/tests/verify_secondary_index_reformat.erl @@ -24,7 +24,7 @@ -include_lib("riakc/include/riakc.hrl"). confirm() -> - [Node] = rt:build_cluster([legacy]), + [Node] = rt_cluster:build_cluster([legacy]), rt:wait_until_nodes_ready([Node]), check_fixed_index_statuses(Node, undefined), diff --git a/tests/verify_snmp.erl b/tests/verify_snmp.erl index 32048f0fb..721969868 100644 --- a/tests/verify_snmp.erl +++ b/tests/verify_snmp.erl @@ -27,7 +27,7 @@ confirm() -> %% Bring up a small cluster - [Node1] = rt:deploy_nodes(1), + [Node1] = rt_cluster:deploy_nodes(1), ?assertEqual(ok, rt:wait_until_nodes_ready([Node1])), Keys = [{vnodeGets,<<"vnode_gets">>}, diff --git a/tests/verify_staged_clustering.erl b/tests/verify_staged_clustering.erl index 0370d0430..6032f0bdf 100644 --- a/tests/verify_staged_clustering.erl +++ b/tests/verify_staged_clustering.erl @@ -24,7 +24,7 @@ -include_lib("eunit/include/eunit.hrl"). confirm() -> - Nodes = rt:deploy_nodes(4), + Nodes = rt_cluster:deploy_nodes(4), [Node1, Node2, Node3, Node4] = Nodes, Nodes123 = [Node1, Node2, Node3], Nodes23 = [Node2, Node3], diff --git a/tests/verify_tick_change.erl b/tests/verify_tick_change.erl index 208adcdd4..cf8c12838 100644 --- a/tests/verify_tick_change.erl +++ b/tests/verify_tick_change.erl @@ -27,7 +27,7 @@ confirm() -> ClusterSize = 4, rt_config:set_conf(all, [{"buckets.default.allow_mult", "false"}]), NewConfig = [], - Nodes = rt:build_cluster(ClusterSize, NewConfig), + Nodes = rt_cluster:build_cluster(ClusterSize, NewConfig), ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), [Node1|_] = Nodes, Bucket = <<"systest">>, diff --git a/tests/verify_vclock.erl b/tests/verify_vclock.erl index dcd24c690..5a5cbaa50 100644 --- a/tests/verify_vclock.erl +++ b/tests/verify_vclock.erl @@ -193,12 +193,12 @@ our_pbc_read(Node, Start, End, Bucket, VSuffix) -> %% For some testing purposes, making these limits smaller is helpful: deploy_test_nodes(false, N) -> - rt:deploy_nodes(N); + rt_cluster:deploy_nodes(N); deploy_test_nodes(true, N) -> lager:info("NOTICE: Using turbo settings for testing."), Config = [{riak_core, [{forced_ownership_handoff, 8}, {handoff_concurrency, 8}, {vnode_inactivity_timeout, 1000}, {gossip_limit, {10000000, 60000}}]}], - rt:deploy_nodes(N, Config). + rt_cluster:deploy_nodes(N, Config). diff --git a/tests/verify_vclock_encoding_upgrade.erl b/tests/verify_vclock_encoding_upgrade.erl index e8614e83b..110951099 100644 --- a/tests/verify_vclock_encoding_upgrade.erl +++ b/tests/verify_vclock_encoding_upgrade.erl @@ -23,7 +23,7 @@ confirm() -> lager:info("Deploying previous cluster"), - [Prev, Current] = rt:build_cluster([previous, current]), + [Prev, Current] = rt_cluster:build_cluster([previous, current]), PrevClient = rt:pbc(Prev), CurrentClient = rt:pbc(Current), K = <<"key">>, diff --git a/tests/yz_ensemble.erl b/tests/yz_ensemble.erl index e62c62769..320b3949a 100644 --- a/tests/yz_ensemble.erl +++ b/tests/yz_ensemble.erl @@ -89,10 +89,10 @@ verify_ensemble_delete_support(Node, Bucket, Index) -> %% node when adding yokozuna and ensemble support. Waiting for yokozuna %% to load on each node allows join_cluster to complete consistently build_cluster_with_yz_support(Num, Config, NVal) -> - Nodes = rt:deploy_nodes(Num, Config), + Nodes = rt_cluster:deploy_nodes(Num, Config), [rt:wait_for_cluster_service([N], yokozuna) || N <- Nodes], Node = hd(Nodes), - rt:join_cluster(Nodes), + rt_cluster:join_cluster(Nodes), ensemble_util:wait_until_cluster(Nodes), ensemble_util:wait_for_membership(Node), ensemble_util:wait_until_stable(Node, NVal), From 8ef3501f50ad30da8592b8fc55a24cab95d9e547 Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 13:23:11 -0400 Subject: [PATCH 04/17] Move backend related functions from rt module to new rt_backend module. --- src/riak_test_runner.erl | 3 +- src/rt.erl | 70 ---------------------- src/rt_backend.erl | 95 ++++++++++++++++++++++++++++++ src/rtdev.erl | 2 +- src/rtssh.erl | 2 +- tests/mapred_notfound_failover.erl | 2 +- tests/verify_membackend.erl | 4 +- 7 files changed, 102 insertions(+), 76 deletions(-) create mode 100644 src/rt_backend.erl diff --git a/src/riak_test_runner.erl b/src/riak_test_runner.erl index 62da93043..a1a263c9c 100644 --- a/src/riak_test_runner.erl +++ b/src/riak_test_runner.erl @@ -51,7 +51,8 @@ run(TestModule, Outdir, TestMetaData, HarnessArgs) -> undefined -> []; Value -> [{multi_config, Value}] end, - Backend = rt:set_backend(proplists:get_value(backend, TestMetaData), BackendExtras), + Backend = rt_backend:set_backend( + proplists:get_value(backend, TestMetaData), BackendExtras), {PropsMod, PropsFun} = function_name(properties, TestModule, 0, rt_cluster), {SetupMod, SetupFun} = function_name(setup, TestModule, 2, rt_cluster), {ConfirmMod, ConfirmFun} = function_name(confirm, TestModule), diff --git a/src/rt.erl b/src/rt.erl index 378b6bbe4..f309283d4 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -89,8 +89,6 @@ riak/2, riak_repl/2, rpc_get_env/2, - set_backend/1, - set_backend/2, setup_harness/2, setup_log_capture/1, slow_upgrade/3, @@ -1356,74 +1354,6 @@ enable_search_hook(Node, Bucket) when is_binary(Bucket) -> lager:info("Installing search hook for bucket ~p", [Bucket]), ?assertEqual(ok, rpc:call(Node, riak_search_kv_hook, install, [Bucket])). -%%%=================================================================== -%%% Test harness setup, configuration, and internal utilities -%%%=================================================================== - -%% @doc Sets the backend of ALL nodes that could be available to riak_test. -%% this is not limited to the nodes under test, but any node that -%% riak_test is able to find. It then queries each available node -%% for it's backend, and returns it if they're all equal. If different -%% nodes have different backends, it returns a list of backends. -%% Currently, there is no way to request multiple backends, so the -%% list return type should be considered an error. --spec set_backend(atom()) -> atom()|[atom()]. -set_backend(Backend) -> - set_backend(Backend, []). - --spec set_backend(atom(), [{atom(), term()}]) -> atom()|[atom()]. -set_backend(bitcask, _) -> - set_backend(riak_kv_bitcask_backend); -set_backend(eleveldb, _) -> - set_backend(riak_kv_eleveldb_backend); -set_backend(memory, _) -> - set_backend(riak_kv_memory_backend); -set_backend(multi, Extras) -> - set_backend(riak_kv_multi_backend, Extras); -set_backend(Backend, _) when Backend == riak_kv_bitcask_backend; Backend == riak_kv_eleveldb_backend; Backend == riak_kv_memory_backend -> - lager:info("rt:set_backend(~p)", [Backend]), - rt_config:update_app_config(all, [{riak_kv, [{storage_backend, Backend}]}]), - get_backends(); -set_backend(Backend, Extras) when Backend == riak_kv_multi_backend -> - MultiConfig = proplists:get_value(multi_config, Extras, default), - Config = make_multi_backend_config(MultiConfig), - rt_config:update_app_config(all, [{riak_kv, Config}]), - get_backends(); -set_backend(Other, _) -> - lager:warning("rt:set_backend doesn't recognize ~p as a legit backend, using the default.", [Other]), - get_backends(). - -make_multi_backend_config(default) -> - [{storage_backend, riak_kv_multi_backend}, - {multi_backend_default, <<"eleveldb1">>}, - {multi_backend, [{<<"eleveldb1">>, riak_kv_eleveldb_backend, []}, - {<<"memory1">>, riak_kv_memory_backend, []}, - {<<"bitcask1">>, riak_kv_bitcask_backend, []}]}]; -make_multi_backend_config(indexmix) -> - [{storage_backend, riak_kv_multi_backend}, - {multi_backend_default, <<"eleveldb1">>}, - {multi_backend, [{<<"eleveldb1">>, riak_kv_eleveldb_backend, []}, - {<<"memory1">>, riak_kv_memory_backend, []}]}]; -make_multi_backend_config(Other) -> - lager:warning("rt:set_multi_backend doesn't recognize ~p as legit multi-backend config, using default", [Other]), - make_multi_backend_config(default). - -get_backends() -> - Backends = ?HARNESS:get_backends(), - case Backends of - [riak_kv_bitcask_backend] -> bitcask; - [riak_kv_eleveldb_backend] -> eleveldb; - [riak_kv_memory_backend] -> memory; - [Other] -> Other; - MoreThanOne -> MoreThanOne - end. - --spec get_backend([proplists:property()]) -> atom() | error. -get_backend(AppConfigProplist) -> - case kvc:path('riak_kv.storage_backend', AppConfigProplist) of - [] -> error; - Backend -> Backend - end. %% @doc Gets the current version under test. In the case of an upgrade test %% or something like that, it's the version you're upgrading to. diff --git a/src/rt_backend.erl b/src/rt_backend.erl new file mode 100644 index 000000000..59f42739d --- /dev/null +++ b/src/rt_backend.erl @@ -0,0 +1,95 @@ +%% ------------------------------------------------------------------- +%% +%% Copyright (c) 2013-2014 Basho Technologies, Inc. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- +-module(rt_backend). +-include("rt.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-compile(export_all). + +-define(HARNESS, (rt_config:get(rt_harness))). + +%%%=================================================================== +%%% Test harness setup, configuration, and internal utilities +%%%=================================================================== + +%% @doc Sets the backend of ALL nodes that could be available to riak_test. +%% this is not limited to the nodes under test, but any node that +%% riak_test is able to find. It then queries each available node +%% for it's backend, and returns it if they're all equal. If different +%% nodes have different backends, it returns a list of backends. +%% Currently, there is no way to request multiple backends, so the +%% list return type should be considered an error. +-spec set_backend(atom()) -> atom()|[atom()]. +set_backend(Backend) -> + set_backend(Backend, []). + +-spec set_backend(atom(), [{atom(), term()}]) -> atom()|[atom()]. +set_backend(bitcask, _) -> + set_backend(riak_kv_bitcask_backend); +set_backend(eleveldb, _) -> + set_backend(riak_kv_eleveldb_backend); +set_backend(memory, _) -> + set_backend(riak_kv_memory_backend); +set_backend(multi, Extras) -> + set_backend(riak_kv_multi_backend, Extras); +set_backend(Backend, _) when Backend == riak_kv_bitcask_backend; Backend == riak_kv_eleveldb_backend; Backend == riak_kv_memory_backend -> + lager:info("rt_backend:set_backend(~p)", [Backend]), + rt_config:update_app_config(all, [{riak_kv, [{storage_backend, Backend}]}]), + get_backends(); +set_backend(Backend, Extras) when Backend == riak_kv_multi_backend -> + MultiConfig = proplists:get_value(multi_config, Extras, default), + Config = make_multi_backend_config(MultiConfig), + rt_config:update_app_config(all, [{riak_kv, Config}]), + get_backends(); +set_backend(Other, _) -> + lager:warning("rt_backend:set_backend doesn't recognize ~p as a legit backend, using the default.", [Other]), + get_backends(). + +make_multi_backend_config(default) -> + [{storage_backend, riak_kv_multi_backend}, + {multi_backend_default, <<"eleveldb1">>}, + {multi_backend, [{<<"eleveldb1">>, riak_kv_eleveldb_backend, []}, + {<<"memory1">>, riak_kv_memory_backend, []}, + {<<"bitcask1">>, riak_kv_bitcask_backend, []}]}]; +make_multi_backend_config(indexmix) -> + [{storage_backend, riak_kv_multi_backend}, + {multi_backend_default, <<"eleveldb1">>}, + {multi_backend, [{<<"eleveldb1">>, riak_kv_eleveldb_backend, []}, + {<<"memory1">>, riak_kv_memory_backend, []}]}]; +make_multi_backend_config(Other) -> + lager:warning("rt:set_multi_backend doesn't recognize ~p as legit multi-backend config, using default", [Other]), + make_multi_backend_config(default). + +get_backends() -> + Backends = ?HARNESS:get_backends(), + case Backends of + [riak_kv_bitcask_backend] -> bitcask; + [riak_kv_eleveldb_backend] -> eleveldb; + [riak_kv_memory_backend] -> memory; + [Other] -> Other; + MoreThanOne -> MoreThanOne + end. + +-spec get_backend([proplists:property()]) -> atom() | error. +get_backend(AppConfigProplist) -> + case kvc:path('riak_kv.storage_backend', AppConfigProplist) of + [] -> error; + Backend -> Backend + end. diff --git a/src/rtdev.erl b/src/rtdev.erl index 8974ceebf..d3d9bbc09 100644 --- a/src/rtdev.erl +++ b/src/rtdev.erl @@ -305,7 +305,7 @@ get_backend(AppConfig) -> case file:consult(ConfigFile) of {ok, [Config]} -> - rt:get_backend(Config); + rt_backend:get_backend(Config); E -> lager:error("Error reading ~s, ~p", [ConfigFile, E]), error diff --git a/src/rtssh.erl b/src/rtssh.erl index 081fb2b13..1ee477c65 100644 --- a/src/rtssh.erl +++ b/src/rtssh.erl @@ -88,7 +88,7 @@ get_backend(Host, AppConfig) -> Str = binary_to_list(Bin), {ok, ErlTok, _} = erl_scan:string(Str), {ok, Term} = erl_parse:parse_term(ErlTok), - rt:get_backend(Term). + rt_backend:get_backend(Term). cmd(Cmd) -> cmd(Cmd, []). diff --git a/tests/mapred_notfound_failover.erl b/tests/mapred_notfound_failover.erl index 9076a1c30..488807f01 100644 --- a/tests/mapred_notfound_failover.erl +++ b/tests/mapred_notfound_failover.erl @@ -39,7 +39,7 @@ confirm() -> %% we need the volatility of memory, so we can cause a replica %% notfound by killing a vnode - rt:set_backend(memory), + rt_backend:set_backend(memory), Nodes = rt_cluster:build_cluster(3), diff --git a/tests/verify_membackend.erl b/tests/verify_membackend.erl index e61f57990..2f05cec2c 100644 --- a/tests/verify_membackend.erl +++ b/tests/verify_membackend.erl @@ -214,7 +214,7 @@ mkconf(Test, Mode) -> case Mode of regular -> %% only memory supports TTL - rt:set_backend(memory), + rt_backend:set_backend(memory), [ {riak_core, [ @@ -227,7 +227,7 @@ mkconf(Test, Mode) -> ]} ]; multi -> - rt:set_backend(multi), + rt_backend:set_backend(multi), [ {riak_core, [ {ring_creation_size, 4} From 488b18c66fbbb9a0caf00b94a73644c625c0baae Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 14:00:43 -0400 Subject: [PATCH 05/17] Move protobuf-related functions from rt module to rt_pb. --- src/rt.erl | 144 ----------------- src/rt_pb.erl | 164 ++++++++++++++++++++ tests/bucket_props_roundtrip.erl | 2 +- tests/ensemble_basic3.erl | 10 +- tests/ensemble_basic4.erl | 6 +- tests/ensemble_interleave.erl | 8 +- tests/ensemble_remove_node2.erl | 8 +- tests/ensemble_sync.erl | 10 +- tests/ensemble_vnode_crash.erl | 10 +- tests/jmx_verify.erl | 6 +- tests/loaded_upgrade.erl | 6 +- tests/loaded_upgrade_worker_sup.erl | 4 +- tests/mapred_basic_compat.erl | 2 +- tests/mapred_javascript.erl | 2 +- tests/mapred_notfound_failover.erl | 2 +- tests/mapred_search_switch.erl | 2 +- tests/overload.erl | 2 +- tests/partition_repair.erl | 4 +- tests/replication/repl_aae_fullsync.erl | 4 +- tests/replication/repl_bucket_types.erl | 4 +- tests/replication/repl_reduced.erl | 18 +-- tests/replication/replication2.erl | 2 +- tests/replication/replication2_pg.erl | 84 +++++----- tests/replication/rt_cascading.erl | 46 +++--- tests/secondary_index_tests.erl | 4 +- tests/sibling_explosion.erl | 2 +- tests/verify_2i_aae.erl | 4 +- tests/verify_2i_limit.erl | 4 +- tests/verify_2i_mixed_cluster.erl | 6 +- tests/verify_2i_returnterms.erl | 2 +- tests/verify_2i_stream.erl | 2 +- tests/verify_2i_timeout.erl | 2 +- tests/verify_aae.erl | 4 +- tests/verify_api_timeouts.erl | 8 +- tests/verify_asis_put.erl | 4 +- tests/verify_backup_restore.erl | 12 +- tests/verify_bitcask_tombstone2_upgrade.erl | 2 +- tests/verify_conditional_postcommit.erl | 4 +- tests/verify_corruption_filtering.erl | 4 +- tests/verify_counter_capability.erl | 4 +- tests/verify_crdt_capability.erl | 2 +- tests/verify_cs_bucket.erl | 2 +- tests/verify_dt_context.erl | 2 +- tests/verify_dt_converge.erl | 2 +- tests/verify_dt_upgrade.erl | 4 +- tests/verify_dvv_repl.erl | 2 +- tests/verify_handoff_mixed.erl | 4 +- tests/verify_kv_health_check.erl | 2 +- tests/verify_link_walk_urls.erl | 2 +- tests/verify_listkeys.erl | 12 +- tests/verify_listkeys_eqcfsm.erl | 2 +- tests/verify_mr_prereduce_node_down.erl | 2 +- tests/verify_no_writes_on_read.erl | 2 +- tests/verify_object_limits.erl | 2 +- tests/verify_reset_bucket_props.erl | 2 +- tests/verify_riak_stats.erl | 6 +- tests/verify_secondary_index_reformat.erl | 4 +- tests/verify_vclock.erl | 6 +- tests/verify_vclock_encoding_upgrade.erl | 4 +- tests/yz_ensemble.erl | 4 +- 60 files changed, 353 insertions(+), 333 deletions(-) create mode 100644 src/rt_pb.erl diff --git a/src/rt.erl b/src/rt.erl index f309283d4..7df35c6d9 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -72,16 +72,6 @@ owners_according_to/1, partition/2, partitions_for_node/1, - pbc/1, - pbc_read/3, - pbc_read/4, - pbc_read_check/4, - pbc_read_check/5, - pbc_set_bucket_prop/3, - pbc_write/4, - pbc_put_dir/3, - pbc_put_file/4, - pbc_really_deleted/3, pmap/2, post_result/2, priv_dir/0, @@ -1120,144 +1110,10 @@ get_replica(Node, Bucket, Key, I, N) -> ?assert(false) end. -%%%=================================================================== - -%% @doc PBC-based version of {@link systest_write/1} -pbc_systest_write(Node, Size) -> - pbc_systest_write(Node, Size, 2). - -pbc_systest_write(Node, Size, W) -> - pbc_systest_write(Node, 1, Size, <<"systest">>, W). - -pbc_systest_write(Node, Start, End, Bucket, W) -> - rt:wait_for_service(Node, riak_kv), - Pid = pbc(Node), - F = fun(N, Acc) -> - Obj = riakc_obj:new(Bucket, <>, <>), - try riakc_pb_socket:put(Pid, Obj, W) of - ok -> - Acc; - Other -> - [{N, Other} | Acc] - catch - What:Why -> - [{N, {What, Why}} | Acc] - end - end, - lists:foldl(F, [], lists:seq(Start, End)). - -pbc_systest_read(Node, Size) -> - pbc_systest_read(Node, Size, 2). - -pbc_systest_read(Node, Size, R) -> - pbc_systest_read(Node, 1, Size, <<"systest">>, R). - -pbc_systest_read(Node, Start, End, Bucket, R) -> - rt:wait_for_service(Node, riak_kv), - Pid = pbc(Node), - F = fun(N, Acc) -> - case riakc_pb_socket:get(Pid, Bucket, <>, R) of - {ok, Obj} -> - case riakc_obj:get_value(Obj) of - <> -> - Acc; - WrongVal -> - [{N, {wrong_val, WrongVal}} | Acc] - end; - Other -> - [{N, Other} | Acc] - end - end, - lists:foldl(F, [], lists:seq(Start, End)). - %%%=================================================================== %%% PBC & HTTPC Functions %%%=================================================================== -%% @doc get me a protobuf client process and hold the mayo! --spec pbc(node()) -> pid(). -pbc(Node) -> - rt:wait_for_service(Node, riak_kv), - ConnInfo = proplists:get_value(Node, connection_info([Node])), - {IP, PBPort} = proplists:get_value(pb, ConnInfo), - {ok, Pid} = riakc_pb_socket:start_link(IP, PBPort, [{auto_reconnect, true}]), - Pid. - -%% @doc does a read via the erlang protobuf client --spec pbc_read(pid(), binary(), binary()) -> binary(). -pbc_read(Pid, Bucket, Key) -> - pbc_read(Pid, Bucket, Key, []). - --spec pbc_read(pid(), binary(), binary(), [any()]) -> binary(). -pbc_read(Pid, Bucket, Key, Options) -> - {ok, Value} = riakc_pb_socket:get(Pid, Bucket, Key, Options), - Value. - --spec pbc_read_check(pid(), binary(), binary(), [any()]) -> boolean(). -pbc_read_check(Pid, Bucket, Key, Allowed) -> - pbc_read_check(Pid, Bucket, Key, Allowed, []). - --spec pbc_read_check(pid(), binary(), binary(), [any()], [any()]) -> boolean(). -pbc_read_check(Pid, Bucket, Key, Allowed, Options) -> - case riakc_pb_socket:get(Pid, Bucket, Key, Options) of - {ok, _} -> - true = lists:member(ok, Allowed); - Other -> - lists:member(Other, Allowed) orelse throw({failed, Other, Allowed}) - end. - -%% @doc does a write via the erlang protobuf client --spec pbc_write(pid(), binary(), binary(), binary()) -> atom(). -pbc_write(Pid, Bucket, Key, Value) -> - Object = riakc_obj:new(Bucket, Key, Value), - riakc_pb_socket:put(Pid, Object). - -%% @doc does a write via the erlang protobuf client plus content-type --spec pbc_write(pid(), binary(), binary(), binary(), list()) -> atom(). -pbc_write(Pid, Bucket, Key, Value, CT) -> - Object = riakc_obj:new(Bucket, Key, Value, CT), - riakc_pb_socket:put(Pid, Object). - -%% @doc sets a bucket property/properties via the erlang protobuf client --spec pbc_set_bucket_prop(pid(), binary(), [proplists:property()]) -> atom(). -pbc_set_bucket_prop(Pid, Bucket, PropList) -> - riakc_pb_socket:set_bucket(Pid, Bucket, PropList). - -%% @doc Puts the contents of the given file into the given bucket using the -%% filename as a key and assuming a plain text content type. -pbc_put_file(Pid, Bucket, Key, Filename) -> - {ok, Contents} = file:read_file(Filename), - riakc_pb_socket:put(Pid, riakc_obj:new(Bucket, Key, Contents, "text/plain")). - -%% @doc Puts all files in the given directory into the given bucket using the -%% filename as a key and assuming a plain text content type. -pbc_put_dir(Pid, Bucket, Dir) -> - lager:info("Putting files from dir ~p into bucket ~p", [Dir, Bucket]), - {ok, Files} = file:list_dir(Dir), - [pbc_put_file(Pid, Bucket, list_to_binary(F), filename:join([Dir, F])) - || F <- Files]. - -%% @doc True if the given keys have been really, really deleted. -%% Useful when you care about the keys not being there. Delete simply writes -%% tombstones under the given keys, so those are still seen by key folding -%% operations. -pbc_really_deleted(Pid, Bucket, Keys) -> - StillThere = - fun(K) -> - Res = riakc_pb_socket:get(Pid, Bucket, K, - [{r, 1}, - {notfound_ok, false}, - {basic_quorum, false}, - deletedvclock]), - case Res of - {error, notfound} -> - false; - _ -> - %% Tombstone still around - true - end - end, - [] == lists:filter(StillThere, Keys). %% @doc Returns HTTPS URL information for a list of Nodes https_url(Nodes) when is_list(Nodes) -> diff --git a/src/rt_pb.erl b/src/rt_pb.erl new file mode 100644 index 000000000..959f7a815 --- /dev/null +++ b/src/rt_pb.erl @@ -0,0 +1,164 @@ +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- +-module(rt_pb). +-include("rt.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-compile(export_all). +-export([pbc/1, + pbc_read/3, + pbc_read/4, + pbc_read_check/4, + pbc_read_check/5, + pbc_set_bucket_prop/3, + pbc_write/4, + pbc_put_dir/3, + pbc_put_file/4, + pbc_really_deleted/3]). + +-define(HARNESS, (rt_config:get(rt_harness))). + +%% @doc get me a protobuf client process and hold the mayo! +-spec pbc(node()) -> pid(). +pbc(Node) -> + rt:wait_for_service(Node, riak_kv), + ConnInfo = proplists:get_value(Node, rt:connection_info([Node])), + {IP, PBPort} = proplists:get_value(pb, ConnInfo), + {ok, Pid} = riakc_pb_socket:start_link(IP, PBPort, [{auto_reconnect, true}]), + Pid. + +%% @doc does a read via the erlang protobuf client +-spec pbc_read(pid(), binary(), binary()) -> binary(). +pbc_read(Pid, Bucket, Key) -> + pbc_read(Pid, Bucket, Key, []). + +-spec pbc_read(pid(), binary(), binary(), [any()]) -> binary(). +pbc_read(Pid, Bucket, Key, Options) -> + {ok, Value} = riakc_pb_socket:get(Pid, Bucket, Key, Options), + Value. + +-spec pbc_read_check(pid(), binary(), binary(), [any()]) -> boolean(). +pbc_read_check(Pid, Bucket, Key, Allowed) -> + pbc_read_check(Pid, Bucket, Key, Allowed, []). + +-spec pbc_read_check(pid(), binary(), binary(), [any()], [any()]) -> boolean(). +pbc_read_check(Pid, Bucket, Key, Allowed, Options) -> + case riakc_pb_socket:get(Pid, Bucket, Key, Options) of + {ok, _} -> + true = lists:member(ok, Allowed); + Other -> + lists:member(Other, Allowed) orelse throw({failed, Other, Allowed}) + end. + +%% @doc does a write via the erlang protobuf client +-spec pbc_write(pid(), binary(), binary(), binary()) -> atom(). +pbc_write(Pid, Bucket, Key, Value) -> + Object = riakc_obj:new(Bucket, Key, Value), + riakc_pb_socket:put(Pid, Object). + +%% @doc does a write via the erlang protobuf client plus content-type +-spec pbc_write(pid(), binary(), binary(), binary(), list()) -> atom(). +pbc_write(Pid, Bucket, Key, Value, CT) -> + Object = riakc_obj:new(Bucket, Key, Value, CT), + riakc_pb_socket:put(Pid, Object). + +%% @doc sets a bucket property/properties via the erlang protobuf client +-spec pbc_set_bucket_prop(pid(), binary(), [proplists:property()]) -> atom(). +pbc_set_bucket_prop(Pid, Bucket, PropList) -> + riakc_pb_socket:set_bucket(Pid, Bucket, PropList). + +%% @doc Puts the contents of the given file into the given bucket using the +%% filename as a key and assuming a plain text content type. +pbc_put_file(Pid, Bucket, Key, Filename) -> + {ok, Contents} = file:read_file(Filename), + riakc_pb_socket:put(Pid, riakc_obj:new(Bucket, Key, Contents, "text/plain")). + +%% @doc Puts all files in the given directory into the given bucket using the +%% filename as a key and assuming a plain text content type. +pbc_put_dir(Pid, Bucket, Dir) -> + lager:info("Putting files from dir ~p into bucket ~p", [Dir, Bucket]), + {ok, Files} = file:list_dir(Dir), + [pbc_put_file(Pid, Bucket, list_to_binary(F), filename:join([Dir, F])) + || F <- Files]. + +%% @doc True if the given keys have been really, really deleted. +%% Useful when you care about the keys not being there. Delete simply writes +%% tombstones under the given keys, so those are still seen by key folding +%% operations. +pbc_really_deleted(Pid, Bucket, Keys) -> + StillThere = + fun(K) -> + Res = riakc_pb_socket:get(Pid, Bucket, K, + [{r, 1}, + {notfound_ok, false}, + {basic_quorum, false}, + deletedvclock]), + case Res of + {error, notfound} -> + false; + _ -> + %% Tombstone still around + true + end + end, + [] == lists:filter(StillThere, Keys). + +%% @doc PBC-based version of {@link systest_write/1} +pbc_systest_write(Node, Size) -> + pbc_systest_write(Node, Size, 2). + +pbc_systest_write(Node, Size, W) -> + pbc_systest_write(Node, 1, Size, <<"systest">>, W). + +pbc_systest_write(Node, Start, End, Bucket, W) -> + rt:wait_for_service(Node, riak_kv), + Pid = pbc(Node), + F = fun(N, Acc) -> + Obj = riakc_obj:new(Bucket, <>, <>), + try riakc_pb_socket:put(Pid, Obj, W) of + ok -> + Acc; + Other -> + [{N, Other} | Acc] + catch + What:Why -> + [{N, {What, Why}} | Acc] + end + end, + lists:foldl(F, [], lists:seq(Start, End)). + +pbc_systest_read(Node, Size) -> + pbc_systest_read(Node, Size, 2). + +pbc_systest_read(Node, Size, R) -> + pbc_systest_read(Node, 1, Size, <<"systest">>, R). + +pbc_systest_read(Node, Start, End, Bucket, R) -> + rt:wait_for_service(Node, riak_kv), + Pid = pbc(Node), + F = fun(N, Acc) -> + case riakc_pb_socket:get(Pid, Bucket, <>, R) of + {ok, Obj} -> + case riakc_obj:get_value(Obj) of + <> -> + Acc; + WrongVal -> + [{N, {wrong_val, WrongVal}} | Acc] + end; + Other -> + [{N, Other} | Acc] + end + end, + lists:foldl(F, [], lists:seq(Start, End)). diff --git a/tests/bucket_props_roundtrip.erl b/tests/bucket_props_roundtrip.erl index 5febb6c80..7b06fd38b 100644 --- a/tests/bucket_props_roundtrip.erl +++ b/tests/bucket_props_roundtrip.erl @@ -70,7 +70,7 @@ confirm(#rt_properties{nodes=Nodes}, _MD) -> check_prop_set_and_get(Node, Prop, One, Two) -> lager:info("-------- Testing roundtrip for property '~p' ---------", [Prop]), HTTP = rt:httpc(Node), - PBC = rt:pbc(Node), + PBC = rt_pb:pbc(Node), lager:info("HTTP set = ~p", [One]), http_set_property(HTTP, Prop, One), lager:info("PBC get should == ~p", [One]), diff --git a/tests/ensemble_basic3.erl b/tests/ensemble_basic3.erl index f51a82b3a..92953cd63 100644 --- a/tests/ensemble_basic3.erl +++ b/tests/ensemble_basic3.erl @@ -55,17 +55,17 @@ confirm() -> Partitioned = [VNode || {_, VNode} <- PartitionedVN], MajorityVN = All -- PartitionedVN, - PBC = rt:pbc(Node), + PBC = rt_pb:pbc(Node), lager:info("Partitioning quorum minority: ~p", [Partitioned]), Part = rt:partition(Nodes -- Partitioned, Partitioned), ensemble_util:wait_until_stable(Node, Quorum), lager:info("Writing ~p consistent keys", [1000]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], + [ok = rt_pb:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], lager:info("Read keys to verify they exist"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], + [rt_pb:pbc_read(PBC, Bucket, Key) || Key <- Keys], lager:info("Healing partition"), rt:heal(Part), @@ -86,7 +86,7 @@ confirm() -> vnode_util:resume_vnode(Pid), ensemble_util:wait_until_stable(Node, Quorum), lager:info("Re-reading keys"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], + [rt_pb:pbc_read(PBC, Bucket, Key) || Key <- Keys], lager:info("Suspending vnode: ~p", [VIdx]), Pid2 = vnode_util:suspend_vnode(VNode, VIdx), orddict:store(VN, Pid2, Suspended) @@ -96,5 +96,5 @@ confirm() -> [vnode_util:resume_vnode(Pid) || {_, Pid} <- L2], ensemble_util:wait_until_stable(Node, NVal), lager:info("Re-reading keys"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], + [rt_pb:pbc_read(PBC, Bucket, Key) || Key <- Keys], pass. diff --git a/tests/ensemble_basic4.erl b/tests/ensemble_basic4.erl index 494e21c55..35cd92f50 100644 --- a/tests/ensemble_basic4.erl +++ b/tests/ensemble_basic4.erl @@ -48,7 +48,7 @@ confirm() -> PartitionedVN = lists:sublist(Other, Minority), Partitioned = [VNode || {_, VNode} <- PartitionedVN], - PBC = rt:pbc(Node), + PBC = rt_pb:pbc(Node), lager:info("Partitioning quorum minority: ~p", [Partitioned]), Part = rt:partition(Nodes -- Partitioned, Partitioned), @@ -56,10 +56,10 @@ confirm() -> ensemble_util:wait_until_stable(Node, Quorum), lager:info("Writing ~p consistent keys", [1000]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], + [ok = rt_pb:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], lager:info("Read keys to verify they exist"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], + [rt_pb:pbc_read(PBC, Bucket, Key) || Key <- Keys], lager:info("Healing partition"), rt:heal(Part), diff --git a/tests/ensemble_interleave.erl b/tests/ensemble_interleave.erl index 5e34659f6..105bffffa 100644 --- a/tests/ensemble_interleave.erl +++ b/tests/ensemble_interleave.erl @@ -65,7 +65,7 @@ confirm() -> [KillFirst,KillSecond|Suspend] = All -- PartitionedVN, io:format("PL: ~p~n", [PL]), - PBC = rt:pbc(Node), + PBC = rt_pb:pbc(Node), Options = [{timeout, 500}], rpc:multicall(Nodes, riak_kv_entropy_manager, set_mode, [manual]), @@ -73,10 +73,10 @@ confirm() -> ensemble_util:wait_until_stable(Node, Quorum), lager:info("Writing ~p consistent keys", [1000]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], + [ok = rt_pb:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], lager:info("Read keys to verify they exist"), - [rt:pbc_read(PBC, Bucket, Key, Options) || Key <- Keys], + [rt_pb:pbc_read(PBC, Bucket, Key, Options) || Key <- Keys], rt:heal(Part), [begin @@ -97,5 +97,5 @@ confirm() -> lager:info("Re-reading keys to verify they exist"), Expect = [ok, {error, timeout}, {error, <<"timeout">>}, {error, <<"failed">>}], - [rt:pbc_read_check(PBC, Bucket, Key, Expect, Options) || Key <- Keys], + [rt_pb:pbc_read_check(PBC, Bucket, Key, Expect, Options) || Key <- Keys], pass. diff --git a/tests/ensemble_remove_node2.erl b/tests/ensemble_remove_node2.erl index 3efaa7ace..9d3a6945a 100644 --- a/tests/ensemble_remove_node2.erl +++ b/tests/ensemble_remove_node2.erl @@ -44,9 +44,9 @@ confirm() -> ensemble_util:wait_until_stable(Node, NVal), Bucket = {<<"strong">>, <<"test">>}, Key = <<"testkey">>, - PBC = rt:pbc(Node), - ok = rt:pbc_write(PBC, Bucket, Key, testval), - Val1 = rt:pbc_read(PBC, Bucket, Key), + PBC = rt_pb:pbc(Node), + ok = rt_pb:pbc_write(PBC, Bucket, Key, testval), + Val1 = rt_pb:pbc_read(PBC, Bucket, Key), ?assertEqual(element(1, Val1), riakc_obj), %% Don't allow node deletions in riak_ensemble. This should prevent the @@ -80,7 +80,7 @@ confirm() -> %% We should still be able to read from k/v ensembles, but the nodes should %% never exit lager:info("Reading From SC Bucket"), - Val2 = rt:pbc_read(PBC, Bucket, Key), + Val2 = rt_pb:pbc_read(PBC, Bucket, Key), ?assertEqual(element(1, Val2), riakc_obj), ok = ensemble_util:wait_until_stable(Node, NVal), diff --git a/tests/ensemble_sync.erl b/tests/ensemble_sync.erl index 0fcc66ccf..6fa89feac 100644 --- a/tests/ensemble_sync.erl +++ b/tests/ensemble_sync.erl @@ -71,7 +71,7 @@ run_scenario(Nodes, NVal, {NumKill, NumSuspend, NumValid, _, Name, Expect}) -> {AfterVN, _} = lists:split(NumValid, Valid3), io:format("PL: ~p~n", [PL]), - PBC = rt:pbc(Node), + PBC = rt_pb:pbc(Node), Options = [{timeout, 2000}], rpc:multicall(Nodes, riak_kv_entropy_manager, set_mode, [manual]), @@ -80,10 +80,10 @@ run_scenario(Nodes, NVal, {NumKill, NumSuspend, NumValid, _, Name, Expect}) -> %% Write data while minority is partitioned lager:info("Writing ~p consistent keys", [1000]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], + [ok = rt_pb:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], lager:info("Read keys to verify they exist"), - [rt:pbc_read(PBC, Bucket, Key, Options) || Key <- Keys], + [rt_pb:pbc_read(PBC, Bucket, Key, Options) || Key <- Keys], rt:heal(Part), %% Suspend desired number of valid vnodes @@ -104,7 +104,7 @@ run_scenario(Nodes, NVal, {NumKill, NumSuspend, NumValid, _, Name, Expect}) -> ensemble_util:wait_until_stable(Node, Quorum), lager:info("Checking that key results match scenario"), - [rt:pbc_read_check(PBC, Bucket, Key, Expect, Options) || Key <- Keys], + [rt_pb:pbc_read_check(PBC, Bucket, Key, Expect, Options) || Key <- Keys], lager:info("Re-enabling AAE"), rpc:multicall(Nodes, riak_kv_entropy_manager, enable, []), @@ -120,7 +120,7 @@ run_scenario(Nodes, NVal, {NumKill, NumSuspend, NumValid, _, Name, Expect}) -> ok; false -> lager:info("Re-reading keys to verify they exist"), - [rt:pbc_read(PBC, Bucket, Key, Options) || Key <- Keys] + [rt_pb:pbc_read(PBC, Bucket, Key, Options) || Key <- Keys] end, lager:info("Scenario passed"), diff --git a/tests/ensemble_vnode_crash.erl b/tests/ensemble_vnode_crash.erl index 59fbd56e8..7bd59bdf0 100644 --- a/tests/ensemble_vnode_crash.erl +++ b/tests/ensemble_vnode_crash.erl @@ -46,13 +46,13 @@ confirm() -> PL = rpc:call(Node, riak_core_apl, get_primary_apl, [DocIdx, NVal, riak_kv]), {{Key1Idx, Key1Node}, _} = hd(PL), - PBC = rt:pbc(Node), + PBC = rt_pb:pbc(Node), lager:info("Writing ~p consistent keys", [1000]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], + [ok = rt_pb:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], lager:info("Read keys to verify they exist"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], + [rt_pb:pbc_read(PBC, Bucket, Key) || Key <- Keys], %% Setting up intercept to ensure that %% riak_kv_ensemble_backend:handle_down/4 gets called when a vnode or vnode @@ -79,7 +79,7 @@ confirm() -> lager:info("Wait for stable ensembles"), ensemble_util:wait_until_stable(Node, NVal), lager:info("Re-reading keys"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], + [rt_pb:pbc_read(PBC, Bucket, Key) || Key <- Keys], lager:info("Killing Vnode Proxy for Key1"), Proxy = rpc:call(Key1Node, riak_core_vnode_proxy, reg_name, [riak_kv_vnode, @@ -95,7 +95,7 @@ confirm() -> lager:info("Wait for stable ensembles"), ensemble_util:wait_until_stable(Node, NVal), lager:info("Re-reading keys"), - [rt:pbc_read(PBC, Bucket, Key) || Key <- Keys], + [rt_pb:pbc_read(PBC, Bucket, Key) || Key <- Keys], pass. diff --git a/tests/jmx_verify.erl b/tests/jmx_verify.erl index 508174160..8966143d1 100644 --- a/tests/jmx_verify.erl +++ b/tests/jmx_verify.erl @@ -79,7 +79,7 @@ confirm() -> <<"node_put_fsm_time_100">>]), lager:info("Make PBC Connection"), - Pid = rt:pbc(Node1), + Pid = rt_pb:pbc(Node1), JMX3 = jmx_dump(JMXDumpCmd), rt:systest_write(Node1, 1), @@ -89,7 +89,7 @@ confirm() -> {<<"pbc_active">>, 1}]), lager:info("Force Read Repair"), - rt:pbc_write(Pid, <<"testbucket">>, <<"1">>, <<"blah!">>), + rt_pb:pbc_write(Pid, <<"testbucket">>, <<"1">>, <<"blah!">>), rt:pbc_set_bucket_prop(Pid, <<"testbucket">>, [{n_val, 4}]), JMX4 = jmx_dump(JMXDumpCmd), @@ -97,7 +97,7 @@ confirm() -> verify_inc(JMX3, JMX4, [{<<"read_repairs_total">>, 0}, {<<"read_repairs">>, 0}]), - _Value = rt:pbc_read(Pid, <<"testbucket">>, <<"1">>), + _Value = rt_pb:pbc_read(Pid, <<"testbucket">>, <<"1">>), %%Stats5 = get_stats(Node1), JMX5 = jmx_dump(JMXDumpCmd), diff --git a/tests/loaded_upgrade.erl b/tests/loaded_upgrade.erl index 50e46e1ab..7e89350fb 100644 --- a/tests/loaded_upgrade.erl +++ b/tests/loaded_upgrade.erl @@ -145,7 +145,7 @@ bucket(mapred) -> <<"bryanitbs">>; bucket(search) -> <<"scotts_spam">>. seed_search(Node) -> - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), SpamDir = rt_config:get(spam_dir), Files = case SpamDir of undefined -> undefined; @@ -157,7 +157,7 @@ seed_search(Node) -> seed_search(_Pid, []) -> ok; seed_search(Pid, [File|Files]) -> Key = list_to_binary(filename:basename(File)), - rt:pbc_put_file(Pid, bucket(search), Key, File), + rt_pb:pbc_put_file(Pid, bucket(search), Key, File), seed_search(Pid, Files). kv_seed(Node) -> @@ -201,7 +201,7 @@ mr_seed(Node) -> seed(Node, 0, 9999, ValFun). seed(Node, Start, End, ValFun) -> - PBC = rt:pbc(Node), + PBC = rt_pb:pbc(Node), [ begin Obj = ValFun(Key), diff --git a/tests/loaded_upgrade_worker_sup.erl b/tests/loaded_upgrade_worker_sup.erl index 3e62c440c..c1d1c5c26 100644 --- a/tests/loaded_upgrade_worker_sup.erl +++ b/tests/loaded_upgrade_worker_sup.erl @@ -237,12 +237,12 @@ assert_equal(Expected, Actual) -> Actual == Expected. pb_pid_recycler(undefined, Node) -> - rt:pbc(Node); + rt_pb:pbc(Node); pb_pid_recycler(Pid, Node) -> case riakc_pb_socket:is_connected(Pid) of true -> Pid; _ -> riakc_pb_socket:stop(Pid), - rt:pbc(Node) + rt_pb:pbc(Node) end. diff --git a/tests/mapred_basic_compat.erl b/tests/mapred_basic_compat.erl index 6b827b7fa..ee7f9b91f 100644 --- a/tests/mapred_basic_compat.erl +++ b/tests/mapred_basic_compat.erl @@ -89,7 +89,7 @@ load_test_data([Node|_]) -> [{<<"link 1">>, [{?LINK_BUCKET, <<"nokey-1">>}]}, {<<"link 2">>, [{?LINK_BUCKET, <<"nokey-2">>}]}]), - C = rt:pbc(Node), + C = rt_pb:pbc(Node), ok = riakc_pb_socket:put(C, riakc_obj:update_metadata(Obj, MD)), %% Some bucket type entries {mytype,foonum}/bar{1..10} diff --git a/tests/mapred_javascript.erl b/tests/mapred_javascript.erl index fedfadf5c..301c494f0 100644 --- a/tests/mapred_javascript.erl +++ b/tests/mapred_javascript.erl @@ -67,7 +67,7 @@ load_test_data([Node|_]) -> Map = riakc_obj:new(?JS_BUCKET, <<"map">>, ?MAP_JS, "text/plain"), Red = riakc_obj:new(?JS_BUCKET, <<"reduce">>, ?REDUCE_JS, "text/plain"), - C = rt:pbc(Node), + C = rt_pb:pbc(Node), ok = riakc_pb_socket:put(C, Map), ok = riakc_pb_socket:put(C, Red), riakc_pb_socket:stop(C). diff --git a/tests/mapred_notfound_failover.erl b/tests/mapred_notfound_failover.erl index 488807f01..b0e8fad9f 100644 --- a/tests/mapred_notfound_failover.erl +++ b/tests/mapred_notfound_failover.erl @@ -78,7 +78,7 @@ replica_notfound(Node, {HashMod, HashFun}, MissingBucket, MissingKey, MissingValue) -> %% create a value for the "missing" key Obj = riakc_obj:new(MissingBucket, MissingKey, MissingValue), - C = rt:pbc(Node), + C = rt_pb:pbc(Node), ok = riakc_pb_socket:put(C, Obj, [{w, 3}]), riakc_pb_socket:stop(C), %% and now kill the first replica; this will make the vnode local diff --git a/tests/mapred_search_switch.erl b/tests/mapred_search_switch.erl index 1b9bbfc8c..e34951b76 100644 --- a/tests/mapred_search_switch.erl +++ b/tests/mapred_search_switch.erl @@ -282,7 +282,7 @@ got_error(_) -> false. run_bucket_mr([Node|_], Bucket, Common) -> - C = rt:pbc(Node), + C = rt_pb:pbc(Node), riakc_pb_socket:mapred( C, %% TODO: check {search, Bucket, Common, Filter} diff --git a/tests/overload.erl b/tests/overload.erl index 63f6a4040..41f092283 100644 --- a/tests/overload.erl +++ b/tests/overload.erl @@ -163,7 +163,7 @@ test_cover_queries_overload(Nodes) -> wait_for_all_vnode_queues_empty(Node2). list_keys(Node) -> - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), riakc_pb_socket:list_keys(Pid, ?BUCKET, 30000). list_buckets(Node) -> diff --git a/tests/partition_repair.erl b/tests/partition_repair.erl index 757aabf85..b3223eb3a 100644 --- a/tests/partition_repair.erl +++ b/tests/partition_repair.erl @@ -86,8 +86,8 @@ confirm() -> rt:enable_search_hook(hd(Nodes), Bucket), lager:info("Insert Scott's spam emails"), - Pbc = rt:pbc(hd(Nodes)), - rt:pbc_put_dir(Pbc, Bucket, SpamDir), + Pbc = rt_pb:pbc(hd(Nodes)), + rt_pb:pbc_put_dir(Pbc, Bucket, SpamDir), lager:info("Stash ITFs for each partition"), %% @todo Should riak_test guarantee that the scratch pad is clean instead? diff --git a/tests/replication/repl_aae_fullsync.erl b/tests/replication/repl_aae_fullsync.erl index fdb655df0..31c360984 100644 --- a/tests/replication/repl_aae_fullsync.erl +++ b/tests/replication/repl_aae_fullsync.erl @@ -340,8 +340,8 @@ difference_test() -> connect_cluster(LeaderA, BPort, "B"), %% Get PBC connections. - APBC = rt:pbc(LeaderA), - BPBC = rt:pbc(LeaderB), + APBC = rt_pb:pbc(LeaderA), + BPBC = rt_pb:pbc(LeaderB), %% Write key. ok = riakc_pb_socket:put(APBC, diff --git a/tests/replication/repl_bucket_types.erl b/tests/replication/repl_bucket_types.erl index 17e3f3c18..1a78bdd82 100644 --- a/tests/replication/repl_bucket_types.erl +++ b/tests/replication/repl_bucket_types.erl @@ -20,8 +20,8 @@ setup(Type) -> {LeaderA, LeaderB, ANodes, BNodes} = ClusterNodes = make_clusters(Type), - PBA = rt:pbc(LeaderA), - PBB = rt:pbc(LeaderB), + PBA = rt_pb:pbc(LeaderA), + PBB = rt_pb:pbc(LeaderB), {DefinedType, UndefType} = Types = {<<"working_type">>, <<"undefined_type">>}, diff --git a/tests/replication/repl_reduced.erl b/tests/replication/repl_reduced.erl index 50af2cf5c..989c90458 100644 --- a/tests/replication/repl_reduced.erl +++ b/tests/replication/repl_reduced.erl @@ -103,7 +103,7 @@ data_push_test_() -> {"repl works", timeout, rt_cascading:timeout(1000), fun() -> #data_push_test{c123 = [N1 | _]} = State, - Client123 = rt:pbc(N1), + Client123 = rt_pb:pbc(N1), Bin = <<"data data data">>, Key = <<"derkey">>, Bucket = <<"kicked">>, @@ -129,7 +129,7 @@ data_push_test_() -> end, [rt:wait_until(Node, WaitFun) || Node <- State#data_push_test.c456], lager:info("putting an object on ~p", [N1]), - Client123 = rt:pbc(N1), + Client123 = rt_pb:pbc(N1), Bin = <<"before repl reduction, this is a binary">>, Key = <<"the key">>, Bucket = <<"objects">>, @@ -157,7 +157,7 @@ data_push_test_() -> Got =:= never end, [rt:wait_until(Node, WaitFun) || Node <- State#data_push_test.c456], - Client123 = rt:pbc(N1), + Client123 = rt_pb:pbc(N1), Bin = <<"only carry reduced objects">>, Key = <<"ocro">>, Bucket = <<"objects">>, @@ -181,7 +181,7 @@ data_push_test_() -> Got =:= never end, [rt:wait_until(Node, WaitFun) || Node <- State#data_push_test.c456], - Client123 = rt:pbc(N1), + Client123 = rt_pb:pbc(N1), Bin = <<"only carry reduced objects">>, Key = <<"ocro2">>, Bucket = <<"objects">>, @@ -249,7 +249,7 @@ read_repair_interaction_test_() -> end, [rt:wait_until(Node, WaitFun) || Node <- State#data_push_test.c456], lager:info("putting an object on ~p", [N1]), - Client123 = rt:pbc(N1), + Client123 = rt_pb:pbc(N1), Bin = <<"before repl reduction, this is a binary">>, Key = <<"rrit">>, Bucket = <<"rrit_objects">>, @@ -280,7 +280,7 @@ read_repair_interaction_test_() -> ]}) end, State#data_push_test.c456), [N4 | _] = State#data_push_test.c456, - Client456 = rt:pbc(N4), + Client456 = rt_pb:pbc(N4), % set the nval higher, which make the below have read repair % end up being forced @@ -421,7 +421,7 @@ read_repair_interaction_test_() -> ?assertMatch({ok, _}, Error) end, - Client456 = rt:pbc(hd(State#data_push_test.c456)), + Client456 = rt_pb:pbc(hd(State#data_push_test.c456)), riakc_pb_socket:set_bucket(Client456, Bucket, [{n_val, 5}]), riakc_pb_socket:stop(Client456), @@ -462,7 +462,7 @@ exists(Nodes, Bucket, Key) -> exists(Got, [], _Bucket, _Key) -> Got; exists({error, notfound}, [Node | Tail], Bucket, Key) -> - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), Got = riakc_pb_socket:get(Pid, Bucket, Key, [{pr, 1}]), riakc_pb_socket:stop(Pid), exists(Got, Tail, Bucket, Key); @@ -538,7 +538,7 @@ read(SocketQueue, N, Stop, Bucket, AssertFun) -> make_socket_queue(Nodes) -> Sockets = lists:map(fun(Node) -> - rt:pbc(Node) + rt_pb:pbc(Node) end, Nodes), queue:from_list(Sockets). diff --git a/tests/replication/replication2.erl b/tests/replication/replication2.erl index 5aae6a0ec..702e59296 100644 --- a/tests/replication/replication2.erl +++ b/tests/replication/replication2.erl @@ -469,7 +469,7 @@ pb_write_during_shutdown(Target, BSecond, TestBucket) -> ConnInfo = proplists:get_value(Target, rt:connection_info([Target])), {IP, Port} = proplists:get_value(pb, ConnInfo), lager:info("Connecting to pb socket ~p:~p on ~p", [IP, Port, Target]), - PBSock = rt:pbc(Target), + PBSock = rt_pb:pbc(Target), %% do the stop in the background while we're writing keys spawn(fun() -> diff --git a/tests/replication/replication2_pg.erl b/tests/replication/replication2_pg.erl index 95f71e1a7..01d96474f 100644 --- a/tests/replication/replication2_pg.erl +++ b/tests/replication/replication2_pg.erl @@ -172,20 +172,20 @@ test_basic_pg(Mode, SSL) -> EnabledFor -> lager:info("PG enabled for cluster ~p",[EnabledFor]) end, - PidA = rt:pbc(LeaderA), + PidA = rt_pb:pbc(LeaderA), {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), lager:info("Cluster ID for A = ~p", [CidA]), {Bucket, KeyA, ValueA} = make_test_object("a"), {Bucket, KeyB, ValueB} = make_test_object("b"), - rt:pbc_write(PidA, Bucket, KeyA, ValueA), - rt:pbc_write(PidA, Bucket, KeyB, ValueB), + rt_pb:pbc_write(PidA, Bucket, KeyA, ValueA), + rt_pb:pbc_write(PidA, Bucket, KeyB, ValueB), _FirstA = hd(ANodes), FirstB = hd(BNodes), FirstC = hd(CNodes), - PidB = rt:pbc(FirstB), + PidB = rt_pb:pbc(FirstB), lager:info("Connected to cluster B"), {ok, PGResult} = riak_repl_pb_api:get(PidB,Bucket,KeyA,CidA), ?assertEqual(ValueA, riakc_obj:get_value(PGResult)), @@ -237,7 +237,7 @@ test_basic_pg(Mode, SSL) -> EnabledFor3 -> lager:info("PG enabled for cluster ~p",[EnabledFor3]) end, - PidC = rt:pbc(FirstC), + PidC = rt_pb:pbc(FirstC), Options = [{n_val, 1}, {sloppy_quorum, false}], lager:info("Test proxy get from C using options: ~p", [Options]), @@ -299,16 +299,16 @@ test_12_pg(Mode, SSL) -> end, [rt:wait_until_ring_converged(Ns) || Ns <- [ANodes, BNodes, CNodes]], - PidA = rt:pbc(LeaderA), - rt:pbc_write(PidA, Bucket, KeyA, ValueA), - rt:pbc_write(PidA, Bucket, KeyB, ValueB), + PidA = rt_pb:pbc(LeaderA), + rt_pb:pbc_write(PidA, Bucket, KeyA, ValueA), + rt_pb:pbc_write(PidA, Bucket, KeyB, ValueB), {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), lager:info("Cluster ID for A = ~p", [CidA]), LeaderB = rpc:call(FirstB, riak_repl2_leader, leader_node, []), rt:log_to_nodes([LeaderB], "Trying to use PG while it's disabled"), - PidB = rt:pbc(LeaderB), + PidB = rt_pb:pbc(LeaderB), ?assertEqual({error, notfound}, riak_repl_pb_api:get(PidB, Bucket, KeyA, CidA)), @@ -332,7 +332,7 @@ test_12_pg(Mode, SSL) -> lager:info("Trying proxy_get"), LeaderB2 = rpc:call(FirstB, riak_repl2_leader, leader_node, []), - PidB2 = rt:pbc(LeaderB2), + PidB2 = rt_pb:pbc(LeaderB2), {ok, PGResult} = riak_repl_pb_api:get(PidB2, Bucket, KeyB, CidA), lager:info("PGResult: ~p", [PGResult]), ?assertEqual(ValueB, riakc_obj:get_value(PGResult)), @@ -388,7 +388,7 @@ test_pg_proxy(SSL) -> EnabledFor -> lager:info("PG enabled for cluster ~p",[EnabledFor]) end, - PidA = rt:pbc(LeaderA), + PidA = rt_pb:pbc(LeaderA), {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), lager:info("Cluster ID for A = ~p", [CidA]), @@ -398,16 +398,16 @@ test_pg_proxy(SSL) -> {Bucket, KeyC, ValueC} = make_test_object("c"), {Bucket, KeyD, ValueD} = make_test_object("d"), - rt:pbc_write(PidA, Bucket, KeyA, ValueA), - rt:pbc_write(PidA, Bucket, KeyB, ValueB), - rt:pbc_write(PidA, Bucket, KeyC, ValueC), - rt:pbc_write(PidA, Bucket, KeyD, ValueD), + rt_pb:pbc_write(PidA, Bucket, KeyA, ValueA), + rt_pb:pbc_write(PidA, Bucket, KeyB, ValueB), + rt_pb:pbc_write(PidA, Bucket, KeyC, ValueC), + rt_pb:pbc_write(PidA, Bucket, KeyD, ValueD), %% sanity check. You know, like the 10000 tests that autoconf runs %% before it actually does any work. FirstA = hd(ANodes), FirstB = hd(BNodes), _FirstC = hd(CNodes), - PidB = rt:pbc(FirstB), + PidB = rt_pb:pbc(FirstB), lager:info("Connected to cluster B"), {ok, PGResult} = riak_repl_pb_api:get(PidB,Bucket,KeyA,CidA), ?assertEqual(ValueA, riakc_obj:get_value(PGResult)), @@ -421,7 +421,7 @@ test_pg_proxy(SSL) -> rt:stop(PGLeaderB), [RunningBNode | _ ] = BNodes -- [PGLeaderB], repl_util:wait_until_leader(RunningBNode), - PidB2 = rt:pbc(RunningBNode), + PidB2 = rt_pb:pbc(RunningBNode), lager:info("Now trying proxy_get"), ?assertEqual(ok, wait_until_pg(RunningBNode, PidB2, Bucket, KeyC, CidA)), lager:info("If you got here, proxy_get worked after the pg block requesting leader was killed"), @@ -487,15 +487,15 @@ test_cluster_mapping(SSL) -> [rt:wait_until_ring_converged(Ns) || Ns <- [ANodes, BNodes, CNodes]], - PidA = rt:pbc(LeaderA), + PidA = rt_pb:pbc(LeaderA), {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), lager:info("Cluster ID for A = ~p", [CidA]), - PidB = rt:pbc(LeaderB), + PidB = rt_pb:pbc(LeaderB), {ok,CidB}=riak_repl_pb_api:get_clusterid(PidB), lager:info("Cluster ID for B = ~p", [CidB]), - PidC = rt:pbc(LeaderC), + PidC = rt_pb:pbc(LeaderC), {ok,CidC}=riak_repl_pb_api:get_clusterid(PidC), lager:info("Cluster ID for C = ~p", [CidC]), @@ -505,10 +505,10 @@ test_cluster_mapping(SSL) -> {Bucket, KeyC, ValueC} = make_test_object("c"), {Bucket, KeyD, ValueD} = make_test_object("d"), - rt:pbc_write(PidA, Bucket, KeyA, ValueA), - rt:pbc_write(PidA, Bucket, KeyB, ValueB), - rt:pbc_write(PidA, Bucket, KeyC, ValueC), - rt:pbc_write(PidA, Bucket, KeyD, ValueD), + rt_pb:pbc_write(PidA, Bucket, KeyA, ValueA), + rt_pb:pbc_write(PidA, Bucket, KeyB, ValueB), + rt_pb:pbc_write(PidA, Bucket, KeyC, ValueC), + rt_pb:pbc_write(PidA, Bucket, KeyD, ValueD), {ok, PGResult} = riak_repl_pb_api:get(PidA,Bucket,KeyA,CidA), @@ -605,8 +605,8 @@ test_bidirectional_pg(SSL) -> EnabledForB -> lager:info("PG enabled for cluster ~p",[EnabledForB]) end, - PidA = rt:pbc(LeaderA), - PidB = rt:pbc(FirstB), + PidA = rt_pb:pbc(LeaderA), + PidB = rt_pb:pbc(FirstB), {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), {ok,CidB}=riak_repl_pb_api:get_clusterid(PidB), @@ -617,10 +617,10 @@ test_bidirectional_pg(SSL) -> {Bucket, KeyB, ValueB} = make_test_object("b"), %% write some data to cluster A - rt:pbc_write(PidA, Bucket, KeyA, ValueA), + rt_pb:pbc_write(PidA, Bucket, KeyA, ValueA), %% write some data to cluster B - rt:pbc_write(PidB, Bucket, KeyB, ValueB), + rt_pb:pbc_write(PidB, Bucket, KeyB, ValueB), lager:info("Trying first get"), wait_until_pg(LeaderB, PidB, Bucket, KeyA, CidA), @@ -667,22 +667,22 @@ test_multiple_sink_pg(SSL) -> EnabledForC -> lager:info("PG enabled for cluster ~p",[EnabledForC]) end, - PidA = rt:pbc(LeaderA), + PidA = rt_pb:pbc(LeaderA), {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), lager:info("Cluster ID for A = ~p", [CidA]), {Bucket, KeyA, ValueA} = make_test_object("a"), {Bucket, KeyB, ValueB} = make_test_object("b"), - rt:pbc_write(PidA, Bucket, KeyA, ValueA), - rt:pbc_write(PidA, Bucket, KeyB, ValueB), + rt_pb:pbc_write(PidA, Bucket, KeyA, ValueA), + rt_pb:pbc_write(PidA, Bucket, KeyB, ValueB), _FirstA = hd(ANodes), FirstB = hd(BNodes), FirstC = hd(CNodes), - PidB = rt:pbc(FirstB), - PidC = rt:pbc(FirstC), + PidB = rt_pb:pbc(FirstB), + PidC = rt_pb:pbc(FirstC), {ok, PGResultB} = riak_repl_pb_api:get(PidB,Bucket,KeyA,CidA), ?assertEqual(ValueA, riakc_obj:get_value(PGResultB)), @@ -720,15 +720,15 @@ test_mixed_pg(SSL) -> EnabledFor -> lager:info("PG enabled for cluster ~p",[EnabledFor]) end, - PidA = rt:pbc(LeaderA), + PidA = rt_pb:pbc(LeaderA), {ok,CidA}=riak_repl_pb_api:get_clusterid(PidA), lager:info("Cluster ID for A = ~p", [CidA]), {Bucket, KeyB, ValueB} = make_test_object("b"), {Bucket, KeyC, ValueC} = make_test_object("c"), - rt:pbc_write(PidA, Bucket, KeyB, ValueB), - rt:pbc_write(PidA, Bucket, KeyC, ValueC), + rt_pb:pbc_write(PidA, Bucket, KeyB, ValueB), + rt_pb:pbc_write(PidA, Bucket, KeyC, ValueC), _FirstA = hd(ANodes), FirstB = hd(BNodes), @@ -756,8 +756,8 @@ test_mixed_pg(SSL) -> lager:info("Trying proxy_get"), LeaderC = rpc:call(FirstC, riak_repl2_leader, leader_node, []), - PidB = rt:pbc(FirstB), - PidC = rt:pbc(LeaderC), + PidB = rt_pb:pbc(FirstB), + PidC = rt_pb:pbc(LeaderC), {ok, PGResultB} = riak_repl_pb_api:get(PidB, Bucket, KeyB, CidA), lager:info("PGResultB: ~p", [PGResultB]), @@ -914,15 +914,15 @@ verify_topology_change(SourceNodes, SinkNodes) -> %% Get connections [SourceNode1, _SourceNode2] = SourceNodes, - SourceNode1Pid = rt:pbc(SourceNode1), + SourceNode1Pid = rt_pb:pbc(SourceNode1), [SinkNode1, SinkNode2] = SinkNodes, - SinkNode1Pid = rt:pbc(SinkNode1), + SinkNode1Pid = rt_pb:pbc(SinkNode1), {ok, SourceCid} = riak_repl_pb_api:get_clusterid(SourceNode1Pid), %% Write new object to source. lager:info("Writing key 'before' to the source."), {Bucket, KeyBefore, ValueBefore} = make_test_object("before"), - rt:pbc_write(SourceNode1Pid, Bucket, KeyBefore, ValueBefore), + rt_pb:pbc_write(SourceNode1Pid, Bucket, KeyBefore, ValueBefore), %% Verify proxy_get through the sink works. lager:info("Verifying key 'before' can be read through the sink."), @@ -993,7 +993,7 @@ verify_topology_change(SourceNodes, SinkNodes) -> lager:info("Writing key 'after' to the source."), {ok, SourceCid} = riak_repl_pb_api:get_clusterid(SourceNode1Pid), {Bucket, KeyPost, ValuePost} = make_test_object("after"), - rt:pbc_write(SourceNode1Pid, Bucket, KeyPost, ValuePost), + rt_pb:pbc_write(SourceNode1Pid, Bucket, KeyPost, ValuePost), %% Verify we can retrieve from source. lager:info("Verifying key 'after' can be read through the source."), diff --git a/tests/replication/rt_cascading.erl b/tests/replication/rt_cascading.erl index c8827c6b3..804aba5f5 100644 --- a/tests/replication/rt_cascading.erl +++ b/tests/replication/rt_cascading.erl @@ -97,7 +97,7 @@ simple_test_() -> end}, {"cascade a put from beginning down to ending", timeout, timeout(25), fun() -> - BeginningClient = rt:pbc(State#simple_state.beginning), + BeginningClient = rt_pb:pbc(State#simple_state.beginning), Bin = <<"cascading realtime">>, Obj = riakc_obj:new(<<"objects">>, Bin, Bin), riakc_pb_socket:put(BeginningClient, Obj, [{w,1}]), @@ -110,7 +110,7 @@ simple_test_() -> rpc:call(State#simple_state.middle, riak_repl_console, realtime_cascades, [["never"]]), Bin = <<"disabled cascading">>, Obj = riakc_obj:new(?bucket, Bin, Bin), - Client = rt:pbc(State#simple_state.beginning), + Client = rt_pb:pbc(State#simple_state.beginning), riakc_pb_socket:put(Client, Obj, [{w,1}]), riakc_pb_socket:stop(Client), ?assertEqual(Bin, maybe_eventually_exists(State#simple_state.middle, ?bucket, Bin)), @@ -122,7 +122,7 @@ simple_test_() -> rpc:call(State#simple_state.middle, riak_repl_console, realtime_cascades, [["always"]]), Bin = <<"cascading re-enabled">>, Obj = riakc_obj:new(?bucket, Bin, Bin), - Client = rt:pbc(State#simple_state.beginning), + Client = rt_pb:pbc(State#simple_state.beginning), riakc_pb_socket:put(Client, Obj, [{w,1}]), riakc_pb_socket:stop(Client), ?assertEqual(Bin, maybe_eventually_exists(State#simple_state.middle, ?bucket, Bin)), @@ -187,7 +187,7 @@ big_circle_test_() -> {"circle it", timeout, timeout(65), fun() -> [One | _] = Nodes, - C = rt:pbc(One), + C = rt_pb:pbc(One), Bin = <<"goober">>, Bucket = <<"objects">>, Obj = riakc_obj:new(Bucket, Bin, Bin), @@ -208,7 +208,7 @@ big_circle_test_() -> connect_rt(Node, Port, ConnectToName) end, lists:map(Connect, lists:zip(Nodes, ConnectTo)), - C = rt:pbc(hd(Nodes)), + C = rt_pb:pbc(hd(Nodes)), Bin = <<"2 way repl">>, Bucket = <<"objects">>, Obj = riakc_obj:new(Bucket, Bin, Bin), @@ -285,7 +285,7 @@ circle_test_() -> fun(Nodes) -> [ {"cascade all the way to the other end, but no further", timeout, timeout(12), fun() -> - Client = rt:pbc(hd(Nodes)), + Client = rt_pb:pbc(hd(Nodes)), Bin = <<"cascading">>, Obj = riakc_obj:new(<<"objects">>, Bin, Bin), riakc_pb_socket:put(Client, Obj, [{w,1}]), @@ -301,7 +301,7 @@ circle_test_() -> {"cascade starting at a different point", timeout, timeout(12), fun() -> [One, Two | _] = Nodes, - Client = rt:pbc(Two), + Client = rt_pb:pbc(Two), Bin = <<"start_at_two">>, Obj = riakc_obj:new(<<"objects">>, Bin, Bin), riakc_pb_socket:put(Client, Obj, [{w,1}]), @@ -355,7 +355,7 @@ pyramid_test_() -> {"Cascade to both kids", timeout, timeout(65), fun() -> [Top | _] = Nodes, - Client = rt:pbc(Top), + Client = rt_pb:pbc(Top), Bucket = <<"objects">>, Bin = <<"pyramid_top">>, Obj = riakc_obj:new(Bucket, Bin, Bin), @@ -409,7 +409,7 @@ diamond_test_() -> {"unfortunate double write", timeout, timeout(135), fun() -> [Top, MidLeft, MidRight, Bottom] = Nodes, - Client = rt:pbc(Top), + Client = rt_pb:pbc(Top), Bin = <<"start_at_top">>, Obj = riakc_obj:new(<<"objects">>, Bin, Bin), riakc_pb_socket:put(Client, Obj, [{w,1}]), @@ -444,7 +444,7 @@ diamond_test_() -> [Sink] = proplists:get_value(sinks, Status, [[]]), ExpectSeq = proplists:get_value(expect_seq, Sink), - Client = rt:pbc(MidRight), + Client = rt_pb:pbc(MidRight), Bin = <<"start at midright">>, Bucket = <<"objects">>, Obj = riakc_obj:new(Bucket, Bin, Bin), @@ -499,7 +499,7 @@ circle_and_spurs_test_() -> {"start at north", timeout, timeout(55), fun() -> [North | _Rest] = Nodes, - Client = rt:pbc(North), + Client = rt_pb:pbc(North), Bin = <<"start at north">>, Bucket = <<"objects">>, Obj = riakc_obj:new(Bucket, Bin, Bin), @@ -512,7 +512,7 @@ circle_and_spurs_test_() -> {"Start at west", timeout, timeout(55), fun() -> [_North, _East, West | _Rest] = Nodes, - Client = rt:pbc(West), + Client = rt_pb:pbc(West), Bin = <<"start at west">>, Bucket = <<"objects">>, Obj = riakc_obj:new(Bucket, Bin, Bin), @@ -525,7 +525,7 @@ circle_and_spurs_test_() -> {"spurs don't replicate back", timeout, timeout(55), fun() -> [_North, _East, _West, NorthSpur | _Rest] = Nodes, - Client = rt:pbc(NorthSpur), + Client = rt_pb:pbc(NorthSpur), Bin = <<"start at north_spur">>, Bucket = <<"objects">>, Obj = riakc_obj:new(Bucket, Bin, Bin), @@ -618,7 +618,7 @@ mixed_version_clusters_test_dep() -> {"no cascading at first", timeout, timeout(35), [ {timeout, timeout(15), fun() -> - Client = rt:pbc(N1), + Client = rt_pb:pbc(N1), Bin = <<"no cascade yet">>, Obj = riakc_obj:new(?bucket, Bin, Bin), riakc_pb_socket:put(Client, Obj, [{w, 2}]), @@ -628,7 +628,7 @@ mixed_version_clusters_test_dep() -> end}, {timeout, timeout(15), fun() -> - Client = rt:pbc(N2), + Client = rt_pb:pbc(N2), Bin = <<"no cascade yet 2">>, Obj = riakc_obj:new(?bucket, Bin, Bin), riakc_pb_socket:put(Client, Obj, [{w, 2}]), @@ -672,7 +672,7 @@ mixed_version_clusters_test_dep() -> fun(_) -> [ {"node1 put", timeout, timeout(205), fun() -> - Client = rt:pbc(N1), + Client = rt_pb:pbc(N1), Bin = <<"rt after upgrade">>, Obj = riakc_obj:new(?bucket, Bin, Bin), riakc_pb_socket:put(Client, Obj, [{w, 2}]), @@ -682,7 +682,7 @@ mixed_version_clusters_test_dep() -> end}, {"node2 put", timeout, timeout(25), fun() -> - Client = rt:pbc(N2), + Client = rt_pb:pbc(N2), Bin = <<"rt after upgrade 2">>, Obj = riakc_obj:new(?bucket, Bin, Bin), riakc_pb_socket:put(Client, Obj, [{w, 2}]), @@ -735,7 +735,7 @@ Reses)]), ExistsLookup = NewHead ++ NewTail, Test = fun() -> ?debugFmt("Running test ~p", [Name]), - Client = rt:pbc(Node), + Client = rt_pb:pbc(Node), Key = <<(ToB(Node))/binary, "-write-", (ToB(N))/binary>>, Obj = riakc_obj:new(?bucket, Key, Key), riakc_pb_socket:put(Client, Obj, [{w, 2}]), @@ -823,7 +823,7 @@ new_to_old_test_dep() -> ([New1, Old2, New3]) -> [ {"From new1 to old2", timeout, timeout(25), fun() -> - Client = rt:pbc(New1), + Client = rt_pb:pbc(New1), Bin = <<"new1 to old2">>, Obj = riakc_obj:new(?bucket, Bin, Bin), riakc_pb_socket:put(Client, Obj, [{w, 1}]), @@ -833,7 +833,7 @@ new_to_old_test_dep() -> end}, {"old2 does not cascade at all", timeout, timeout(25), fun() -> - Client = rt:pbc(New1), + Client = rt_pb:pbc(New1), Bin = <<"old2 no cascade">>, Obj = riakc_obj:new(?bucket, Bin, Bin), riakc_pb_socket:put(Client, Obj, [{w, 1}]), @@ -843,7 +843,7 @@ new_to_old_test_dep() -> end}, {"from new3 to old2", timeout, timeout(25), fun() -> - Client = rt:pbc(New3), + Client = rt_pb:pbc(New3), Bin = <<"new3 to old2">>, Obj = riakc_obj:new(?bucket, Bin, Bin), riakc_pb_socket:put(Client, Obj, [{w, 1}]), @@ -857,7 +857,7 @@ new_to_old_test_dep() -> % from an older source cluster/node. It is prevented for now by % having no easy/good way to get the name of the source cluster, % thus preventing complete information on the routed clusters. - Client = rt:pbc(Old2), + Client = rt_pb:pbc(Old2), Bin = <<"old2 to new3">>, Obj = riakc_obj:new(?bucket, Bin, Bin), riakc_pb_socket:put(Client, Obj, [{w,1}]), @@ -1132,7 +1132,7 @@ exists(Nodes, Bucket, Key) -> exists(Got, [], _Bucket, _Key) -> Got; exists({error, notfound}, [Node | Tail], Bucket, Key) -> - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), Got = riakc_pb_socket:get(Pid, Bucket, Key), riakc_pb_socket:stop(Pid), exists(Got, Tail, Bucket, Key); diff --git a/tests/secondary_index_tests.erl b/tests/secondary_index_tests.erl index 26827c6d5..7d66dcc52 100644 --- a/tests/secondary_index_tests.erl +++ b/tests/secondary_index_tests.erl @@ -49,7 +49,7 @@ config() -> confirm(#rt_properties{nodes=Nodes}, _MD) -> Bucket = druuid:v4_str(), lager:info("Bucket: ~p", [Bucket]), - PBC = rt:pbc(hd(Nodes)), + PBC = rt_pb:pbc(hd(Nodes)), HTTPC = rt:httpc(hd(Nodes)), Clients = [{pb, PBC}, {http, HTTPC}], @@ -70,7 +70,7 @@ confirm(#rt_properties{nodes=Nodes}, _MD) -> ToDel = [<<"obj05">>, <<"obj11">>], [?assertMatch(ok, riakc_pb_socket:delete(PBC, Bucket, KD)) || KD <- ToDel], lager:info("Make sure the tombstone is reaped..."), - ?assertMatch(ok, rt:wait_until(fun() -> rt:pbc_really_deleted(PBC, Bucket, ToDel) end)), + ?assertMatch(ok, rt:wait_until(fun() -> rt_pb:pbc_really_deleted(PBC, Bucket, ToDel) end)), assertExactQuery(Clients, Bucket, [], <<"field1_bin">>, <<"val5">>), assertExactQuery(Clients, Bucket, [], <<"field2_int">>, 5), diff --git a/tests/sibling_explosion.erl b/tests/sibling_explosion.erl index 68b8760a9..510742ab8 100644 --- a/tests/sibling_explosion.erl +++ b/tests/sibling_explosion.erl @@ -25,7 +25,7 @@ confirm() -> N = 100, lager:info("Put new object in ~p via PBC.", [Node1]), - PB = rt:pbc(Node1), + PB = rt_pb:pbc(Node1), A0 = riakc_obj:new(<<"b">>, <<"k">>, sets:from_list([0])), B0 = riakc_obj:new(<<"b">>, <<"k">>, sets:from_list([1])), diff --git a/tests/verify_2i_aae.erl b/tests/verify_2i_aae.erl index 39b7709ef..5ac070c36 100644 --- a/tests/verify_2i_aae.erl +++ b/tests/verify_2i_aae.erl @@ -44,7 +44,7 @@ confirm() -> {{diff_index_specs, 2}, skippable_diff_index_specs}]}), lager:info("Installed intercepts to corrupt index specs on node ~p", [Node1]), %%rpc:call(Node1, lager, set_loglevel, [lager_console_backend, debug]), - PBC = rt:pbc(Node1), + PBC = rt_pb:pbc(Node1), NumItems = ?NUM_ITEMS, NumDel = ?NUM_DELETES, pass = check_lost_objects(Node1, PBC, NumItems, NumDel), @@ -87,7 +87,7 @@ check_lost_objects(Node1, PBC, NumItems, NumDel) -> lager:info("Deleting ~b objects without updating indexes", [NumDel]), [del_obj(PBC, Bucket, N) || N <- DelRange, Bucket <- ?BUCKETS], DelKeys = [to_key(N) || N <- DelRange], - [rt:wait_until(fun() -> rt:pbc_really_deleted(PBC, Bucket, DelKeys) end) + [rt:wait_until(fun() -> rt_pb:pbc_really_deleted(PBC, Bucket, DelKeys) end) || Bucket <- ?BUCKETS], %% Verify they are damaged lager:info("Verify change did not take, needs repair"), diff --git a/tests/verify_2i_limit.erl b/tests/verify_2i_limit.erl index 66a7e7a9c..064feb227 100644 --- a/tests/verify_2i_limit.erl +++ b/tests/verify_2i_limit.erl @@ -36,7 +36,7 @@ confirm() -> RiakHttp = rt:httpc(hd(Nodes)), HttpUrl = rt:http_url(hd(Nodes)), - PBPid = rt:pbc(hd(Nodes)), + PBPid = rt_pb:pbc(hd(Nodes)), [put_an_object(PBPid, N) || N <- lists:seq(0, 100)], @@ -89,7 +89,7 @@ confirm() -> %% gh611 - equals query pagination riakc_pb_socket:delete(PBPid, ?BUCKET, <<"bob">>), - rt:wait_until(fun() -> rt:pbc_really_deleted(PBPid, ?BUCKET, [<<"bob">>]) end), + rt:wait_until(fun() -> rt_pb:pbc_really_deleted(PBPid, ?BUCKET, [<<"bob">>]) end), [put_an_object(PBPid, int_to_key(N), 1000, <<"myval">>) || N <- lists:seq(0, 100)], diff --git a/tests/verify_2i_mixed_cluster.erl b/tests/verify_2i_mixed_cluster.erl index 95c5d597e..c8b0d1d95 100644 --- a/tests/verify_2i_mixed_cluster.erl +++ b/tests/verify_2i_mixed_cluster.erl @@ -36,8 +36,8 @@ confirm() -> OldVsn, OldVsn]), ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), - PBC1 = rt:pbc(CurrentNode), - PBC2 = rt:pbc(OldNode1), + PBC1 = rt_pb:pbc(CurrentNode), + PBC2 = rt_pb:pbc(OldNode1), HTTPC1 = rt:httpc(CurrentNode), Clients = [{pb, PBC1}, {pb, PBC2}, {http, HTTPC1}], @@ -57,7 +57,7 @@ confirm() -> ToDel = [<<"obj05">>, <<"obj11">>], [?assertMatch(ok, riakc_pb_socket:delete(PBC1, ?BUCKET, KD)) || KD <- ToDel], lager:info("Make sure the tombstone is reaped..."), - ?assertMatch(ok, rt:wait_until(fun() -> rt:pbc_really_deleted(PBC1, ?BUCKET, ToDel) end)), + ?assertMatch(ok, rt:wait_until(fun() -> rt_pb:pbc_really_deleted(PBC1, ?BUCKET, ToDel) end)), assertExactQuery(Clients, [], <<"field1_bin">>, <<"val5">>), assertExactQuery(Clients, [], <<"field2_int">>, 5), diff --git a/tests/verify_2i_returnterms.erl b/tests/verify_2i_returnterms.erl index 2708a2de8..7548256e3 100644 --- a/tests/verify_2i_returnterms.erl +++ b/tests/verify_2i_returnterms.erl @@ -34,7 +34,7 @@ confirm() -> ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), RiakHttp = rt:http_url(hd(Nodes)), - PBPid = rt:pbc(hd(Nodes)), + PBPid = rt_pb:pbc(hd(Nodes)), [put_an_object(PBPid, N) || N <- lists:seq(0, 100)], [put_an_object(PBPid, int_to_key(N), N, ?FOO) || N <- lists:seq(101, 200)], diff --git a/tests/verify_2i_stream.erl b/tests/verify_2i_stream.erl index 638d68691..8440fc30a 100644 --- a/tests/verify_2i_stream.erl +++ b/tests/verify_2i_stream.erl @@ -33,7 +33,7 @@ confirm() -> ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), RiakHttp = rt:http_url(hd(Nodes)), - PBPid = rt:pbc(hd(Nodes)), + PBPid = rt_pb:pbc(hd(Nodes)), [put_an_object(PBPid, N) || N <- lists:seq(0, 100)], [put_an_object(PBPid, int_to_key(N), N, ?FOO) || N <- lists:seq(101, 200)], diff --git a/tests/verify_2i_timeout.erl b/tests/verify_2i_timeout.erl index e17c09caf..6f05fa955 100644 --- a/tests/verify_2i_timeout.erl +++ b/tests/verify_2i_timeout.erl @@ -32,7 +32,7 @@ confirm() -> Nodes = rt_cluster:build_cluster([{current, Config}, {current, Config}, {current, Config}]), ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), - PBPid = rt:pbc(hd(Nodes)), + PBPid = rt_pb:pbc(hd(Nodes)), Http = rt:http_url(hd(Nodes)), [put_an_object(PBPid, N) || N <- lists:seq(0, 100)], diff --git a/tests/verify_aae.erl b/tests/verify_aae.erl index 97f665d63..a40a42b8a 100644 --- a/tests/verify_aae.erl +++ b/tests/verify_aae.erl @@ -144,7 +144,7 @@ write_data(Node, KVs) -> write_data(Node, KVs, []). write_data(Node, KVs, Opts) -> - PB = rt:pbc(Node), + PB = rt_pb:pbc(Node), [begin O = case riakc_pb_socket:get(PB, ?BUCKET, K) of @@ -161,7 +161,7 @@ write_data(Node, KVs, Opts) -> % @doc Verifies that the data is eventually restored to the expected set. verify_data(Node, KeyValues) -> lager:info("Verify all replicas are eventually correct"), - PB = rt:pbc(Node), + PB = rt_pb:pbc(Node), CheckFun = fun() -> Matches = [verify_replicas(Node, ?BUCKET, K, V, ?N_VAL) diff --git a/tests/verify_api_timeouts.erl b/tests/verify_api_timeouts.erl index 5cd22e08f..6335ac0ee 100644 --- a/tests/verify_api_timeouts.erl +++ b/tests/verify_api_timeouts.erl @@ -72,7 +72,7 @@ confirm() -> end, - PC = rt:pbc(Node), + PC = rt_pb:pbc(Node), lager:info("testing PBC API"), @@ -126,7 +126,7 @@ confirm() -> lager:info("Checking List timeouts"), lager:info("Checking PBC"), - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), lager:info("Checking keys timeout"), ?assertMatch({error, <<"timeout">>}, riakc_pb_socket:list_keys(Pid, ?BUCKET, Short)), @@ -228,7 +228,7 @@ wait_for_end(ReqId) -> put_buckets(Node, Num) -> - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), Buckets = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], {Key, Val} = {<<"test_key">>, <<"test_value">>}, @@ -238,7 +238,7 @@ put_buckets(Node, Num) -> put_keys(Node, Bucket, Num) -> - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), Keys = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], Vals = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], [riakc_pb_socket:put(Pid, riakc_obj:new(Bucket, Key, Val)) || {Key, Val} <- lists:zip(Keys, Vals)], diff --git a/tests/verify_asis_put.erl b/tests/verify_asis_put.erl index f89bd8517..16bbc45fb 100644 --- a/tests/verify_asis_put.erl +++ b/tests/verify_asis_put.erl @@ -7,8 +7,8 @@ confirm() -> [Node1, Node2] = rt_cluster:deploy_nodes(2), %% 2. With PBC lager:info("Put new object in ~p via PBC.", [Node1]), - PB1 = rt:pbc(Node1), - PB2 = rt:pbc(Node2), + PB1 = rt_pb:pbc(Node1), + PB2 = rt_pb:pbc(Node2), Obj1 = riakc_obj:new(<<"verify_asis_put">>, <<"1">>, <<"test">>, "text/plain"), %% a. put in node 1 %% b. fetch from node 1 for vclock diff --git a/tests/verify_backup_restore.erl b/tests/verify_backup_restore.erl index aec9a8bb3..5a94e17ff 100644 --- a/tests/verify_backup_restore.erl +++ b/tests/verify_backup_restore.erl @@ -42,7 +42,7 @@ confirm() -> [Node0 | _RestNodes] = Nodes = rt_cluster:build_cluster(?NUM_NODES, Config), rt:enable_search_hook(Node0, ?SEARCH_BUCKET), rt:wait_until_ring_converged(Nodes), - PbcPid = rt:pbc(Node0), + PbcPid = rt_pb:pbc(Node0), Searches = [ {<<"ZiaSun">>, 1}, @@ -57,7 +57,7 @@ confirm() -> AllTerms = lists:foldl(ConcatBin, <<"">>, Searches), lager:info("Indexing data for search from ~p", [SpamDir]), - rt:pbc_put_dir(PbcPid, ?SEARCH_BUCKET, SpamDir), + rt_pb:pbc_put_dir(PbcPid, ?SEARCH_BUCKET, SpamDir), ExtraKey = <<"Extra1">>, riakc_pb_socket:put(PbcPid, riakc_obj:new(?SEARCH_BUCKET, @@ -98,7 +98,7 @@ confirm() -> {last, ?NUM_MOD+?NUM_DEL}]), lager:info("Deleting extra search doc"), riakc_pb_socket:delete(PbcPid, ?SEARCH_BUCKET, ExtraKey), - rt:wait_until(fun() -> rt:pbc_really_deleted(PbcPid, + rt:wait_until(fun() -> rt_pb:pbc_really_deleted(PbcPid, ?SEARCH_BUCKET, [ExtraKey]) end), @@ -141,7 +141,7 @@ confirm() -> rt:enable_search_hook(Node0, ?SEARCH_BUCKET), rt:wait_until_ring_converged(Nodes), rt:wait_until_no_pending_changes(Nodes), - PbcPid2 = rt:pbc(Node0), + PbcPid2 = rt_pb:pbc(Node0), lager:info("Verify no data in cluster"), [?assertEqual([], read_some(Node, [{last, ?NUM_KEYS}, @@ -198,7 +198,7 @@ write_some(PBC, Props) -> end end, ?assertEqual([], lists:foldl(DelFun, [], Keys)), - rt:wait_until(fun() -> rt:pbc_really_deleted(PBC, Bucket, Keys1) end); + rt:wait_until(fun() -> rt_pb:pbc_really_deleted(PBC, Bucket, Keys1) end); _ -> ok end, @@ -283,7 +283,7 @@ delete_some(PBC, Props) -> end end, lists:foldl(F, [], Keys), - rt:wait_until(fun() -> rt:pbc_really_deleted(PBC, Bucket, Keys) end), + rt:wait_until(fun() -> rt_pb:pbc_really_deleted(PBC, Bucket, Keys) end), ok. verify_search_count(Pid, SearchQuery, Count) -> diff --git a/tests/verify_bitcask_tombstone2_upgrade.erl b/tests/verify_bitcask_tombstone2_upgrade.erl index b6ebcf695..5a999bcc4 100644 --- a/tests/verify_bitcask_tombstone2_upgrade.erl +++ b/tests/verify_bitcask_tombstone2_upgrade.erl @@ -36,7 +36,7 @@ verify_bitcask_tombstone2_upgrade(Nodes) -> lager:info("And that is that"). write_some_data([Node1 | _]) -> - rt:pbc_systest_write(Node1, 10000). + rt_pb:pbc_systest_write(Node1, 10000). list_bitcask_files(Nodes) -> [{Node, list_node_bitcask_files(Node)} || Node <- Nodes]. diff --git a/tests/verify_conditional_postcommit.erl b/tests/verify_conditional_postcommit.erl index cbf79b705..24112468f 100644 --- a/tests/verify_conditional_postcommit.erl +++ b/tests/verify_conditional_postcommit.erl @@ -39,7 +39,7 @@ confirm() -> Bucket1 = {<<"type1">>, <<"test">>}, Bucket2 = {<<"type2">>, <<"test">>}, Keys = [<> || N <- lists:seq(1,1000)], - PBC = rt:pbc(Node), + PBC = rt_pb:pbc(Node), lager:info("Writing keys as 'type1' and verifying hook is not triggered"), write_keys(Node, PBC, Bucket1, Keys, false), @@ -55,7 +55,7 @@ confirm() -> write_keys(Node, PBC, Bucket, Keys, ShouldHook) -> rpc:call(Node, application, set_env, [riak_kv, hook_count, 0]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], + [ok = rt_pb:pbc_write(PBC, Bucket, Key, Key) || Key <- Keys], {ok, Count} = rpc:call(Node, application, get_env, [riak_kv, hook_count]), case ShouldHook of true -> diff --git a/tests/verify_corruption_filtering.erl b/tests/verify_corruption_filtering.erl index 7154786d4..df899458f 100644 --- a/tests/verify_corruption_filtering.erl +++ b/tests/verify_corruption_filtering.erl @@ -65,7 +65,7 @@ confirm() -> pass. get_put_mix(Node) -> - PB = rt:pbc(Node), + PB = rt_pb:pbc(Node), [begin Key = random:uniform(1000), case random:uniform(2) of @@ -90,7 +90,7 @@ get_put_mix(Node) -> || _ <- lists:seq(1, 2000)]. load_cluster(Node) -> - PB = rt:pbc(Node), + PB = rt_pb:pbc(Node), [riakc_pb_socket:put(PB, riakc_obj:new(<<"foo">>, <>, <>)) diff --git a/tests/verify_counter_capability.erl b/tests/verify_counter_capability.erl index 5f8670a45..e8d3aa747 100644 --- a/tests/verify_counter_capability.erl +++ b/tests/verify_counter_capability.erl @@ -61,7 +61,7 @@ confirm() -> rt:upgrade(Legacy, previous), - PrevPB2 = rt:pbc(Legacy), + PrevPB2 = rt_pb:pbc(Legacy), ?assertEqual(ok, rt:wait_until_capability(Previous, {riak_kv, crdt}, [pncounter])), @@ -81,4 +81,4 @@ confirm() -> pass. get_clients(Node) -> - {rt:pbc(Node), rt:httpc(Node)}. + {rt_pb:pbc(Node), rt:httpc(Node)}. diff --git a/tests/verify_crdt_capability.erl b/tests/verify_crdt_capability.erl index 05204ae01..cf6710904 100644 --- a/tests/verify_crdt_capability.erl +++ b/tests/verify_crdt_capability.erl @@ -93,4 +93,4 @@ gen_counter_op() -> riakc_counter:to_op(riakc_counter:increment(riakc_counter:new())). get_clients(Node) -> - {rt:pbc(Node), rt:httpc(Node)}. + {rt_pb:pbc(Node), rt:httpc(Node)}. diff --git a/tests/verify_cs_bucket.erl b/tests/verify_cs_bucket.erl index 3ba21c902..4b517b043 100644 --- a/tests/verify_cs_bucket.erl +++ b/tests/verify_cs_bucket.erl @@ -32,7 +32,7 @@ confirm() -> Nodes = rt_cluster:build_cluster(3), ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), - PBPid = rt:pbc(hd(Nodes)), + PBPid = rt_pb:pbc(hd(Nodes)), [put_an_object(PBPid, N) || N <- lists:seq(0, 200)], diff --git a/tests/verify_dt_context.erl b/tests/verify_dt_context.erl index a9e44d12f..af81a6c67 100644 --- a/tests/verify_dt_context.erl +++ b/tests/verify_dt_context.erl @@ -192,7 +192,7 @@ store_map(Client, Map) -> create_pb_clients(Nodes) -> [begin - C = rt:pbc(N), + C = rt_pb:pbc(N), riakc_pb_socket:set_options(C, [queue_if_disconnected]), C end || N <- Nodes]. diff --git a/tests/verify_dt_converge.erl b/tests/verify_dt_converge.erl index 5abcbcc5a..a963ee2e9 100644 --- a/tests/verify_dt_converge.erl +++ b/tests/verify_dt_converge.erl @@ -124,7 +124,7 @@ confirm() -> create_pb_clients(Nodes) -> [begin - C = rt:pbc(N), + C = rt_pb:pbc(N), riakc_pb_socket:set_options(C, [queue_if_disconnected]), C end || N <- Nodes]. diff --git a/tests/verify_dt_upgrade.erl b/tests/verify_dt_upgrade.erl index 21690c2d6..c7562edfb 100644 --- a/tests/verify_dt_upgrade.erl +++ b/tests/verify_dt_upgrade.erl @@ -56,7 +56,7 @@ populate_counters(Node) -> ?assertMatch(ok, rhc:counter_incr(RHC, ?COUNTER_BUCKET, <<"httpkey">>, 2)), ?assertMatch({ok, 2}, rhc:counter_val(RHC, ?COUNTER_BUCKET, <<"httpkey">>)), - PBC = rt:pbc(Node), + PBC = rt_pb:pbc(Node), ?assertEqual(ok, riakc_pb_socket:counter_incr(PBC, ?COUNTER_BUCKET, <<"pbkey">>, 4)), ?assertEqual({ok, 4}, riakc_pb_socket:counter_val(PBC, ?COUNTER_BUCKET, <<"pbkey">>)), ok. @@ -68,7 +68,7 @@ verify_counters(Node) -> RHC = rt:httpc(Node), ?assertMatch({ok, 4}, rhc:counter_val(RHC, ?COUNTER_BUCKET, <<"pbkey">>)), - PBC = rt:pbc(Node), + PBC = rt_pb:pbc(Node), ?assertEqual({ok, 2}, riakc_pb_socket:counter_val(PBC, ?COUNTER_BUCKET, <<"httpkey">>)), %% Check that 1.4 counters work with bucket types diff --git a/tests/verify_dvv_repl.erl b/tests/verify_dvv_repl.erl index 2f7f776ad..4e6e18281 100644 --- a/tests/verify_dvv_repl.erl +++ b/tests/verify_dvv_repl.erl @@ -71,7 +71,7 @@ make_cluster(Nodes, Name) -> repl_util:make_cluster(Nodes), repl_util:name_cluster(hd(Nodes), Name), repl_util:wait_until_leader_converge(Nodes), - C = rt:pbc(hd(Nodes)), + C = rt_pb:pbc(hd(Nodes)), riakc_pb_socket:set_options(C, [queue_if_disconnected]), {C, Nodes}. diff --git a/tests/verify_handoff_mixed.erl b/tests/verify_handoff_mixed.erl index 0370a49d0..0938a122e 100644 --- a/tests/verify_handoff_mixed.erl +++ b/tests/verify_handoff_mixed.erl @@ -101,7 +101,7 @@ prepare_vnodes(Node) -> prepare_kv_vnodes(Node) -> lager:info("Preparing KV vnodes with keys 1-~b in bucket ~s", [?KV_COUNT, ?KV_BUCKET]), - C = rt:pbc(Node), + C = rt_pb:pbc(Node), lists:foreach( fun(KV) -> ok = riakc_pb_socket:put(C, riakc_obj:new(?KV_BUCKET, KV, KV)) @@ -113,7 +113,7 @@ prepare_search_vnodes(Node) -> lager:info("Peparing Search vnodes with keys 1000-~b in bucket ~s", [1000+?SEARCH_COUNT, ?SEARCH_BUCKET]), rt:enable_search_hook(Node, ?SEARCH_BUCKET), - C = rt:pbc(Node), + C = rt_pb:pbc(Node), lists:foreach( fun(KV) -> O = riakc_obj:new(?SEARCH_BUCKET, KV, KV, "text/plain"), diff --git a/tests/verify_kv_health_check.erl b/tests/verify_kv_health_check.erl index abde0d7aa..861157bfd 100644 --- a/tests/verify_kv_health_check.erl +++ b/tests/verify_kv_health_check.erl @@ -43,7 +43,7 @@ confirm() -> %% make DisableThreshold+5 requests and trigger the health check explicitly %% we only need to backup one vnode's msg queue on the node to fail the health check %% so we read the same key again and again - C = rt:pbc(Node2), + C = rt_pb:pbc(Node2), [riakc_pb_socket:get(C, <<"b">>, <<"k">>) || _ <- lists:seq(1,DisableThreshold+5)], ok = rpc:call(Node1, riak_core_node_watcher, check_health, [riak_kv]), diff --git a/tests/verify_link_walk_urls.erl b/tests/verify_link_walk_urls.erl index ba174b875..1606f1324 100644 --- a/tests/verify_link_walk_urls.erl +++ b/tests/verify_link_walk_urls.erl @@ -34,7 +34,7 @@ confirm() -> [Node0 | _] = rt_cluster:build_cluster(?NUM_NODES), - Pbc = rt:pbc(Node0), + Pbc = rt_pb:pbc(Node0), lager:info("Inserting linked graph"), %% (deleted) (b/4,v4b) <-> (b/5,v5b) diff --git a/tests/verify_listkeys.erl b/tests/verify_listkeys.erl index 087d1b8dd..ab7fd56bc 100644 --- a/tests/verify_listkeys.erl +++ b/tests/verify_listkeys.erl @@ -91,7 +91,7 @@ confirm() -> pass. put_keys(Node, Bucket, Num) -> - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), Keys = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], Vals = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], [riakc_pb_socket:put(Pid, riakc_obj:new(Bucket, Key, Val)) || {Key, Val} <- lists:zip(Keys, Vals)], @@ -100,7 +100,7 @@ put_keys(Node, Bucket, Num) -> list_keys(Node, Interface, Bucket, Attempt, Num, ShouldPass) -> case Interface of pbc -> - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), Mod = riakc_pb_socket; http -> Pid = rt:httpc(Node), @@ -128,7 +128,7 @@ list_keys(Node, Interface, Bucket, Attempt, Num, ShouldPass) -> list_keys_for_undefined_bucket_type(Node, Interface, Bucket, Attempt, ShouldPass) -> case Interface of pbc -> - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), Mod = riakc_pb_socket; http -> Pid = rt:httpc(Node), @@ -151,7 +151,7 @@ list_keys_for_undefined_bucket_type(Node, Interface, Bucket, Attempt, ShouldPass end. put_buckets(Node, Num) -> - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), Buckets = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], {Key, Val} = {<<"test_key">>, <<"test_value">>}, @@ -162,7 +162,7 @@ put_buckets(Node, Num) -> list_buckets(Node, Interface, Attempt, Num, ShouldPass) -> case Interface of pbc -> - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), Mod = riakc_pb_socket; http -> Pid = rt:httpc(Node), @@ -196,7 +196,7 @@ list_buckets(Node, Interface, Attempt, Num, ShouldPass) -> list_buckets_for_undefined_bucket_type(Node, Interface, Attempt, ShouldPass) -> case Interface of pbc -> - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), Mod = riakc_pb_socket; http -> Pid = rt:httpc(Node), diff --git a/tests/verify_listkeys_eqcfsm.erl b/tests/verify_listkeys_eqcfsm.erl index bb751fd6b..f42206da4 100644 --- a/tests/verify_listkeys_eqcfsm.erl +++ b/tests/verify_listkeys_eqcfsm.erl @@ -229,7 +229,7 @@ node_list(NumNodes) -> put_keys(Node, Bucket, Num) -> lager:info("*******************[CMD] Putting ~p keys into bucket ~p on node ~p", [Num, Bucket, Node]), - Pid = rt:pbc(Node), + Pid = rt_pb:pbc(Node), try Keys = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], Vals = [list_to_binary(["", integer_to_list(Ki)]) || Ki <- lists:seq(0, Num - 1)], diff --git a/tests/verify_mr_prereduce_node_down.erl b/tests/verify_mr_prereduce_node_down.erl index e6bc2d914..62dd2f496 100644 --- a/tests/verify_mr_prereduce_node_down.erl +++ b/tests/verify_mr_prereduce_node_down.erl @@ -56,7 +56,7 @@ confirm() -> [] = rt:systest_write(Primary, 1, ObjCount, Bucket, 3), %% run the query a bunch - C = rt:pbc(Primary), + C = rt_pb:pbc(Primary), TestCount = 100, lager:info("Running the MR query ~b times", [TestCount]), Runs = [ run_query(C, Bucket) || _ <- lists:seq(1, TestCount) ], diff --git a/tests/verify_no_writes_on_read.erl b/tests/verify_no_writes_on_read.erl index 5b48ed048..65902cf54 100644 --- a/tests/verify_no_writes_on_read.erl +++ b/tests/verify_no_writes_on_read.erl @@ -12,7 +12,7 @@ confirm() -> lager:info("Running with backend ~p", [Backend]), ?assertEqual(bitcask, Backend), [Node1 | _Rest] = _Nodes = rt_cluster:build_cluster(?NUM_NODES), - PBC = rt:pbc(Node1), + PBC = rt_pb:pbc(Node1), lager:info("Setting last write wins on bucket"), B = ?BUCKET, ?assertMatch(ok, rpc:call(Node1, riak_core_bucket, set_bucket, [B, [{last_write_wins, true}]])), diff --git a/tests/verify_object_limits.erl b/tests/verify_object_limits.erl index a989eed1d..674ab8b96 100644 --- a/tests/verify_object_limits.erl +++ b/tests/verify_object_limits.erl @@ -40,7 +40,7 @@ confirm() -> {warn_object_size, ?WARN_SIZE}, {max_siblings, ?MAX_SIBLINGS}, {warn_siblings, ?WARN_SIBLINGS}]}]), - C = rt:pbc(Node1), + C = rt_pb:pbc(Node1), %% Set up to grep logs to verify messages rt:setup_log_capture(Node1), diff --git a/tests/verify_reset_bucket_props.erl b/tests/verify_reset_bucket_props.erl index c3740bc94..175ebcbdc 100644 --- a/tests/verify_reset_bucket_props.erl +++ b/tests/verify_reset_bucket_props.erl @@ -43,7 +43,7 @@ confirm() -> update_props(DefaultProps, Node1, Nodes), - C = rt:pbc(Node3), + C = rt_pb:pbc(Node3), lager:info("Resetting bucket properties for bucket ~p on node ~p via pbc", [?BUCKET, Node3]), ok = riakc_pb_socket:reset_bucket(C, ?BUCKET), diff --git a/tests/verify_riak_stats.erl b/tests/verify_riak_stats.erl index 87e7d224c..5cbd461bd 100644 --- a/tests/verify_riak_stats.erl +++ b/tests/verify_riak_stats.erl @@ -81,7 +81,7 @@ confirm() -> lager:info("Make PBC Connection"), - Pid = rt:pbc(Node1), + Pid = rt_pb:pbc(Node1), Stats3 = get_stats(Node1), @@ -94,14 +94,14 @@ confirm() -> lager:info("Force Read Repair"), - rt:pbc_write(Pid, <<"testbucket">>, <<"1">>, <<"blah!">>), + rt_pb:pbc_write(Pid, <<"testbucket">>, <<"1">>, <<"blah!">>), rt:pbc_set_bucket_prop(Pid, <<"testbucket">>, [{n_val, 4}]), Stats4 = get_stats(Node1), verify_inc(Stats3, Stats4, [{<<"read_repairs_total">>, 0}, {<<"read_repairs">>, 0}]), - _Value = rt:pbc_read(Pid, <<"testbucket">>, <<"1">>), + _Value = rt_pb:pbc_read(Pid, <<"testbucket">>, <<"1">>), Stats5 = get_stats(Node1), diff --git a/tests/verify_secondary_index_reformat.erl b/tests/verify_secondary_index_reformat.erl index a1455deb1..9235941c3 100644 --- a/tests/verify_secondary_index_reformat.erl +++ b/tests/verify_secondary_index_reformat.erl @@ -37,7 +37,7 @@ confirm() -> %% write key with index that old version of sext would encode improperly (not perserving %% sort order) lager:info("writing test key"), - Client0 = rt:pbc(Node), + Client0 = rt_pb:pbc(Node), Obj0 = riakc_obj:new(TestBucket, TestKey, <<"somevalue">>), ObjMD0 = riakc_obj:get_update_metadata(Obj0), ObjMD1 = riakc_obj:set_secondary_index(ObjMD0, @@ -57,7 +57,7 @@ confirm() -> %% should rewrite 1 index (* n = 3), ignore 0 and have zero errors {3, 0, 0} = rpc:call(Node, riak_kv_util, fix_incorrect_index_entries, []), - Client1 = rt:pbc(Node), + Client1 = rt_pb:pbc(Node), Results = riakc_pb_socket:get_index(Client1, TestBucket, TestIndex, 1000000000000, TestIdxValue), diff --git a/tests/verify_vclock.erl b/tests/verify_vclock.erl index 5a5cbaa50..cf7c89268 100644 --- a/tests/verify_vclock.erl +++ b/tests/verify_vclock.erl @@ -134,10 +134,10 @@ our_pbc_write(Node, Size, Suffix) -> our_pbc_write(Node, 1, Size, <<"systest">>, Suffix). our_pbc_write(Node, Start, End, Bucket, VSuffix) -> - PBC = rt:pbc(Node), + PBC = rt_pb:pbc(Node), F = fun(N, Acc) -> {K, V} = make_kv(N, VSuffix), - try rt:pbc_write(PBC, Bucket, K, V) of + try rt_pb:pbc_write(PBC, Bucket, K, V) of ok -> Acc; Other -> @@ -156,7 +156,7 @@ our_pbc_read(Node, Size, Suffix) -> our_pbc_read(Node, 1, Size, <<"systest">>, Suffix). our_pbc_read(Node, Start, End, Bucket, VSuffix) -> - PBC = rt:pbc(Node), + PBC = rt_pb:pbc(Node), %% Trundle along through the list, collecting mismatches: F = fun(N, Acc) -> diff --git a/tests/verify_vclock_encoding_upgrade.erl b/tests/verify_vclock_encoding_upgrade.erl index 110951099..cd482ee76 100644 --- a/tests/verify_vclock_encoding_upgrade.erl +++ b/tests/verify_vclock_encoding_upgrade.erl @@ -24,8 +24,8 @@ confirm() -> lager:info("Deploying previous cluster"), [Prev, Current] = rt_cluster:build_cluster([previous, current]), - PrevClient = rt:pbc(Prev), - CurrentClient = rt:pbc(Current), + PrevClient = rt_pb:pbc(Prev), + CurrentClient = rt_pb:pbc(Current), K = <<"key">>, B = <<"bucket">>, V = <<"value">>, diff --git a/tests/yz_ensemble.erl b/tests/yz_ensemble.erl index 320b3949a..318fa6ae6 100644 --- a/tests/yz_ensemble.erl +++ b/tests/yz_ensemble.erl @@ -47,10 +47,10 @@ verify_ensemble_delete_support(Node, Bucket, Index) -> Keys = [<> || N <- lists:seq(1,2000), not lists:any(fun(E) -> E > 127 end,binary_to_list(<>))], - PBC = rt:pbc(Node), + PBC = rt_pb:pbc(Node), lager:info("Writing ~p keys", [length(Keys)]), - [ok = rt:pbc_write(PBC, Bucket, Key, Key, "text/plain") || Key <- Keys], + [ok = rt_pb:pbc_write(PBC, Bucket, Key, Key, "text/plain") || Key <- Keys], %% soft commit wait, then check that last key is indexed lager:info("Search for keys to verify they exist"), From bc8e818476af637d68f024dfb622c0dcea1b56c3 Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 14:09:50 -0400 Subject: [PATCH 06/17] Fix rt_pb function exports. --- src/rt.erl | 16 +--------------- src/rt_pb.erl | 26 ++++++++++++++++++++++++-- 2 files changed, 25 insertions(+), 17 deletions(-) diff --git a/src/rt.erl b/src/rt.erl index 7df35c6d9..93bb48d87 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -189,7 +189,7 @@ rpc_get_env(Node, [{App,Var}|Others]) -> -spec connection_info(node() | [node()]) -> interfaces() | conn_info(). connection_info(Node) when is_atom(Node) -> - {ok, [{PB_IP, PB_Port}]} = get_pb_conn_info(Node), + {ok, [{PB_IP, PB_Port}]} = rt_pb:get_pb_conn_info(Node), {ok, [{HTTP_IP, HTTP_Port}]} = get_http_conn_info(Node), case get_https_conn_info(Node) of undefined -> @@ -200,20 +200,6 @@ connection_info(Node) when is_atom(Node) -> connection_info(Nodes) when is_list(Nodes) -> [ {Node, connection_info(Node)} || Node <- Nodes]. --spec get_pb_conn_info(node()) -> [{inet:ip_address(), pos_integer()}]. -get_pb_conn_info(Node) -> - case rpc_get_env(Node, [{riak_api, pb}, - {riak_api, pb_ip}, - {riak_kv, pb_ip}]) of - {ok, [{NewIP, NewPort}|_]} -> - {ok, [{NewIP, NewPort}]}; - {ok, PB_IP} -> - {ok, PB_Port} = rpc_get_env(Node, [{riak_api, pb_port}, - {riak_kv, pb_port}]), - {ok, [{PB_IP, PB_Port}]}; - _ -> - undefined - end. -spec get_http_conn_info(node()) -> [{inet:ip_address(), pos_integer()}]. get_http_conn_info(Node) -> diff --git a/src/rt_pb.erl b/src/rt_pb.erl index 959f7a815..809e8b717 100644 --- a/src/rt_pb.erl +++ b/src/rt_pb.erl @@ -16,7 +16,6 @@ -include("rt.hrl"). -include_lib("eunit/include/eunit.hrl"). --compile(export_all). -export([pbc/1, pbc_read/3, pbc_read/4, @@ -24,9 +23,17 @@ pbc_read_check/5, pbc_set_bucket_prop/3, pbc_write/4, + pbc_write/5, pbc_put_dir/3, pbc_put_file/4, - pbc_really_deleted/3]). + pbc_really_deleted/3, + pbc_systest_write/2, + pbc_systest_write/3, + pbc_systest_write/5, + pbc_systest_read/2, + pbc_systest_read/3, + pbc_systest_read/5, + get_pb_conn_info/1]). -define(HARNESS, (rt_config:get(rt_harness))). @@ -162,3 +169,18 @@ pbc_systest_read(Node, Start, End, Bucket, R) -> end end, lists:foldl(F, [], lists:seq(Start, End)). + +-spec get_pb_conn_info(node()) -> [{inet:ip_address(), pos_integer()}]. +get_pb_conn_info(Node) -> + case rt:rpc_get_env(Node, [{riak_api, pb}, + {riak_api, pb_ip}, + {riak_kv, pb_ip}]) of + {ok, [{NewIP, NewPort}|_]} -> + {ok, [{NewIP, NewPort}]}; + {ok, PB_IP} -> + {ok, PB_Port} = rt:rpc_get_env(Node, [{riak_api, pb_port}, + {riak_kv, pb_port}]), + {ok, [{PB_IP, PB_Port}]}; + _ -> + undefined + end. From 311719e73f1dbc38007b72a7283f643eec78206d Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 14:36:03 -0400 Subject: [PATCH 07/17] Refactor http-relate rt functions - Move rt module functions to new rt_http module. - Convert http_bucket_types to new test convention. --- src/rt.erl | 66 +---------------------- src/rt_http.erl | 82 +++++++++++++++++++++++++++++ src/rt_pb.erl | 5 ++ tests/bucket_props_roundtrip.erl | 2 +- tests/http_bucket_types.erl | 25 +++++---- tests/jmx_verify.erl | 6 +-- tests/mapred_search_switch.erl | 4 +- tests/replication/replication2.erl | 2 +- tests/secondary_index_tests.erl | 2 +- tests/verify_2i_limit.erl | 2 +- tests/verify_2i_mixed_cluster.erl | 2 +- tests/verify_api_timeouts.erl | 8 +-- tests/verify_asis_put.erl | 4 +- tests/verify_commit_hooks.erl | 18 +++---- tests/verify_counter_capability.erl | 2 +- tests/verify_counter_converge.erl | 2 +- tests/verify_counter_repl.erl | 2 +- tests/verify_crdt_capability.erl | 2 +- tests/verify_dt_converge.erl | 2 +- tests/verify_dt_upgrade.erl | 4 +- tests/verify_listkeys.erl | 8 +-- tests/verify_riak_stats.erl | 6 +-- 22 files changed, 144 insertions(+), 112 deletions(-) create mode 100644 src/rt_http.erl diff --git a/src/rt.erl b/src/rt.erl index 93bb48d87..3aed6408c 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -55,11 +55,6 @@ get_ring/1, get_version/0, heal/1, - http_url/1, - https_url/1, - httpc/1, - httpc_read/3, - httpc_write/4, is_mixed_cluster/1, is_pingable/1, join/2, @@ -190,8 +185,8 @@ rpc_get_env(Node, [{App,Var}|Others]) -> -spec connection_info(node() | [node()]) -> interfaces() | conn_info(). connection_info(Node) when is_atom(Node) -> {ok, [{PB_IP, PB_Port}]} = rt_pb:get_pb_conn_info(Node), - {ok, [{HTTP_IP, HTTP_Port}]} = get_http_conn_info(Node), - case get_https_conn_info(Node) of + {ok, [{HTTP_IP, HTTP_Port}]} = rt_http:get_http_conn_info(Node), + case rt_http:get_https_conn_info(Node) of undefined -> [{http, {HTTP_IP, HTTP_Port}}, {pb, {PB_IP, PB_Port}}]; {ok, [{HTTPS_IP, HTTPS_Port}]} -> @@ -201,27 +196,6 @@ connection_info(Nodes) when is_list(Nodes) -> [ {Node, connection_info(Node)} || Node <- Nodes]. --spec get_http_conn_info(node()) -> [{inet:ip_address(), pos_integer()}]. -get_http_conn_info(Node) -> - case rpc_get_env(Node, [{riak_api, http}, - {riak_core, http}]) of - {ok, [{IP, Port}|_]} -> - {ok, [{IP, Port}]}; - _ -> - undefined - end. - --spec get_https_conn_info(node()) -> [{inet:ip_address(), pos_integer()}]. -get_https_conn_info(Node) -> - case rpc_get_env(Node, [{riak_api, https}, - {riak_core, https}]) of - {ok, [{IP, Port}|_]} -> - {ok, [{IP, Port}]}; - _ -> - undefined - end. - - %% @doc Start the specified Riak node start(Node) -> ?HARNESS:start(Node). @@ -1101,42 +1075,6 @@ get_replica(Node, Bucket, Key, I, N) -> %%%=================================================================== -%% @doc Returns HTTPS URL information for a list of Nodes -https_url(Nodes) when is_list(Nodes) -> - [begin - {Host, Port} = orddict:fetch(https, Connections), - lists:flatten(io_lib:format("https://~s:~b", [Host, Port])) - end || {_Node, Connections} <- connection_info(Nodes)]; -https_url(Node) -> - hd(https_url([Node])). - -%% @doc Returns HTTP URL information for a list of Nodes -http_url(Nodes) when is_list(Nodes) -> - [begin - {Host, Port} = orddict:fetch(http, Connections), - lists:flatten(io_lib:format("http://~s:~b", [Host, Port])) - end || {_Node, Connections} <- connection_info(Nodes)]; -http_url(Node) -> - hd(http_url([Node])). - -%% @doc get me an http client. --spec httpc(node()) -> term(). -httpc(Node) -> - rt:wait_for_service(Node, riak_kv), - {ok, [{IP, Port}]} = get_http_conn_info(Node), - rhc:create(IP, Port, "riak", []). - -%% @doc does a read via the http erlang client. --spec httpc_read(term(), binary(), binary()) -> binary(). -httpc_read(C, Bucket, Key) -> - {_, Value} = rhc:get(C, Bucket, Key), - Value. - -%% @doc does a write via the http erlang client. --spec httpc_write(term(), binary(), binary(), binary()) -> atom(). -httpc_write(C, Bucket, Key, Value) -> - Object = riakc_obj:new(Bucket, Key, Value), - rhc:put(C, Object). %%%=================================================================== %%% Command Line Functions diff --git a/src/rt_http.erl b/src/rt_http.erl new file mode 100644 index 000000000..1941e38aa --- /dev/null +++ b/src/rt_http.erl @@ -0,0 +1,82 @@ +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- +-module(rt_http). +-include("rt.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-export([http_url/1, + https_url/1, + httpc/1, + httpc_read/3, + httpc_write/4, + get_http_conn_info/1, + get_https_conn_info/1]). + +%% @doc Returns HTTPS URL information for a list of Nodes +https_url(Nodes) when is_list(Nodes) -> + [begin + {Host, Port} = orddict:fetch(https, Connections), + lists:flatten(io_lib:format("https://~s:~b", [Host, Port])) + end || {_Node, Connections} <- rt:connection_info(Nodes)]; +https_url(Node) -> + hd(https_url([Node])). + +%% @doc Returns HTTP URL information for a list of Nodes +http_url(Nodes) when is_list(Nodes) -> + [begin + {Host, Port} = orddict:fetch(http, Connections), + lists:flatten(io_lib:format("http://~s:~b", [Host, Port])) + end || {_Node, Connections} <- rt:connection_info(Nodes)]; +http_url(Node) -> + hd(http_url([Node])). + +%% @doc get me an http client. +-spec httpc(node()) -> term(). +httpc(Node) -> + rt:wait_for_service(Node, riak_kv), + {ok, [{IP, Port}]} = get_http_conn_info(Node), + rhc:create(IP, Port, "riak", []). + +%% @doc does a read via the http erlang client. +-spec httpc_read(term(), binary(), binary()) -> binary(). +httpc_read(C, Bucket, Key) -> + {_, Value} = rhc:get(C, Bucket, Key), + Value. + +%% @doc does a write via the http erlang client. +-spec httpc_write(term(), binary(), binary(), binary()) -> atom(). +httpc_write(C, Bucket, Key, Value) -> + Object = riakc_obj:new(Bucket, Key, Value), + rhc:put(C, Object). + +-spec get_http_conn_info(node()) -> [{inet:ip_address(), pos_integer()}]. +get_http_conn_info(Node) -> + case rt:rpc_get_env(Node, [{riak_api, http}, + {riak_core, http}]) of + {ok, [{IP, Port}|_]} -> + {ok, [{IP, Port}]}; + _ -> + undefined + end. + + +-spec get_https_conn_info(node()) -> [{inet:ip_address(), pos_integer()}]. +get_https_conn_info(Node) -> + case rt:rpc_get_env(Node, [{riak_api, https}, + {riak_core, https}]) of + {ok, [{IP, Port}|_]} -> + {ok, [{IP, Port}]}; + _ -> + undefined + end. diff --git a/src/rt_pb.erl b/src/rt_pb.erl index 809e8b717..157261758 100644 --- a/src/rt_pb.erl +++ b/src/rt_pb.erl @@ -1,3 +1,8 @@ +%% ------------------------------------------------------------------- +%% +%% Copyright (c) 2013-2014 Basho Technologies, Inc. +%% +%% This file is provided to you under the Apache License, %% Version 2.0 (the "License"); you may not use this file %% except in compliance with the License. You may obtain %% a copy of the License at diff --git a/tests/bucket_props_roundtrip.erl b/tests/bucket_props_roundtrip.erl index 7b06fd38b..90bd2fbd4 100644 --- a/tests/bucket_props_roundtrip.erl +++ b/tests/bucket_props_roundtrip.erl @@ -69,7 +69,7 @@ confirm(#rt_properties{nodes=Nodes}, _MD) -> check_prop_set_and_get(Node, Prop, One, Two) -> lager:info("-------- Testing roundtrip for property '~p' ---------", [Prop]), - HTTP = rt:httpc(Node), + HTTP = rt_http:httpc(Node), PBC = rt_pb:pbc(Node), lager:info("HTTP set = ~p", [One]), http_set_property(HTTP, Prop, One), diff --git a/tests/http_bucket_types.erl b/tests/http_bucket_types.erl index 74d348cb2..e379246e4 100644 --- a/tests/http_bucket_types.erl +++ b/tests/http_bucket_types.erl @@ -1,18 +1,25 @@ -module(http_bucket_types). --behavior(riak_test). --export([confirm/0, mapred_modfun/3, mapred_modfun_type/3]). +-export([properties/0, confirm/2, mapred_modfun/3, mapred_modfun_type/3]). -include_lib("eunit/include/eunit.hrl"). -include_lib("riakc/include/riakc.hrl"). +-include("rt.hrl"). + +properties() -> + DefaultProps = rt_cluster:properties(), + CustomConfig = rt_cluster:augment_config(riak_core, + {default_bucket_props, [{n_val, 2}]}, + DefaultProps#rt_properties.config), + DefaultProps#rt_properties{node_count=1, + rolling_upgrade=false, + make_cluster=true, + config=CustomConfig}. + +confirm(#rt_properties{nodes=Nodes}, _MD) -> + Node = hd(Nodes), -confirm() -> application:start(ibrowse), - lager:info("Deploy some nodes"), - Nodes = rt_cluster:build_cluster(4, [], [ - {riak_core, [{default_bucket_props, - [{n_val, 2}]}]}]), - Node = hd(Nodes), RMD = riak_test_runner:metadata(), HaveIndexes = case proplists:get_value(backend, RMD) of @@ -21,7 +28,7 @@ confirm() -> _ -> true end, - RHC = rt:httpc(Node), + RHC = rt_http:httpc(Node), lager:info("default type get/put test"), %% write explicitly to the default type ok = rhc:put(RHC, riakc_obj:new({<<"default">>, <<"bucket">>}, diff --git a/tests/jmx_verify.erl b/tests/jmx_verify.erl index 8966143d1..de893edcb 100644 --- a/tests/jmx_verify.erl +++ b/tests/jmx_verify.erl @@ -51,9 +51,9 @@ confirm() -> lager:info("perform 5 x PUT and a GET to increment the stats"), lager:info("as the stat system only does calcs for > 5 readings"), - C = rt:httpc(Node1), - [rt:httpc_write(C, <<"systest">>, <>, <<"12345">>) || X <- lists:seq(1, 5)], - [rt:httpc_read(C, <<"systest">>, <>) || X <- lists:seq(1, 5)], + C = rt_http:httpc(Node1), + [rt_http:httpc_write(C, <<"systest">>, <>, <<"12345">>) || X <- lists:seq(1, 5)], + [rt_http:httpc_read(C, <<"systest">>, <>) || X <- lists:seq(1, 5)], JMX2 = jmx_dump(JMXDumpCmd), %% make sure the stats that were supposed to increment did diff --git a/tests/mapred_search_switch.erl b/tests/mapred_search_switch.erl index e34951b76..adf8f0833 100644 --- a/tests/mapred_search_switch.erl +++ b/tests/mapred_search_switch.erl @@ -162,7 +162,7 @@ generate_test_data(System) -> %% setup riak_search hook setup_rs_bucket([Node|_], Bucket) -> lager:info("Setting up riak_search hook"), - C = rt:httpc(Node), + C = rt_http:httpc(Node), ok = rhc:set_bucket(C, Bucket, [{search, true}]). %% setup yokozuna hook/index - bucket name == index name @@ -205,7 +205,7 @@ iburl(Node, Path) -> %% value, and each of which has a unique term in its value load_test_data([Node|_], Bucket, KeyAndUniques, Common) -> lager:info("Loading test data"), - C = rt:httpc(Node), + C = rt_http:httpc(Node), [ begin Value = list_to_binary([Common, " ", Unique]), ok = rhc:put(C, riakc_obj:new(Bucket, Key, Value, "text/plain")) diff --git a/tests/replication/replication2.erl b/tests/replication/replication2.erl index 702e59296..87cee9e05 100644 --- a/tests/replication/replication2.erl +++ b/tests/replication/replication2.erl @@ -527,7 +527,7 @@ http_write_during_shutdown(Target, BSecond, TestBucket) -> ConnInfo = proplists:get_value(Target, rt:connection_info([Target])), {IP, Port} = proplists:get_value(http, ConnInfo), lager:info("Connecting to http socket ~p:~p on ~p", [IP, Port, Target]), - C = rt:httpc(Target), + C = rt_http:httpc(Target), %% do the stop in the background while we're writing keys spawn(fun() -> diff --git a/tests/secondary_index_tests.erl b/tests/secondary_index_tests.erl index 7d66dcc52..4ad75f1c5 100644 --- a/tests/secondary_index_tests.erl +++ b/tests/secondary_index_tests.erl @@ -50,7 +50,7 @@ confirm(#rt_properties{nodes=Nodes}, _MD) -> Bucket = druuid:v4_str(), lager:info("Bucket: ~p", [Bucket]), PBC = rt_pb:pbc(hd(Nodes)), - HTTPC = rt:httpc(hd(Nodes)), + HTTPC = rt_http:httpc(hd(Nodes)), Clients = [{pb, PBC}, {http, HTTPC}], [put_an_object(PBC, Bucket, N) || N <- lists:seq(0, 20)], diff --git a/tests/verify_2i_limit.erl b/tests/verify_2i_limit.erl index 064feb227..6c2acdfbf 100644 --- a/tests/verify_2i_limit.erl +++ b/tests/verify_2i_limit.erl @@ -34,7 +34,7 @@ confirm() -> Nodes = rt_cluster:build_cluster(3), ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), - RiakHttp = rt:httpc(hd(Nodes)), + RiakHttp = rt_http:httpc(hd(Nodes)), HttpUrl = rt:http_url(hd(Nodes)), PBPid = rt_pb:pbc(hd(Nodes)), diff --git a/tests/verify_2i_mixed_cluster.erl b/tests/verify_2i_mixed_cluster.erl index c8b0d1d95..f0a7aa22a 100644 --- a/tests/verify_2i_mixed_cluster.erl +++ b/tests/verify_2i_mixed_cluster.erl @@ -38,7 +38,7 @@ confirm() -> PBC1 = rt_pb:pbc(CurrentNode), PBC2 = rt_pb:pbc(OldNode1), - HTTPC1 = rt:httpc(CurrentNode), + HTTPC1 = rt_http:httpc(CurrentNode), Clients = [{pb, PBC1}, {pb, PBC2}, {http, HTTPC1}], diff --git a/tests/verify_api_timeouts.erl b/tests/verify_api_timeouts.erl index 6335ac0ee..763e3f8bd 100644 --- a/tests/verify_api_timeouts.erl +++ b/tests/verify_api_timeouts.erl @@ -12,10 +12,10 @@ confirm() -> [Node] = rt_cluster:build_cluster(1), rt:wait_until_pingable(Node), - HC = rt:httpc(Node), + HC = rt_http:httpc(Node), lager:info("setting up initial data and loading remote code"), - rt:httpc_write(HC, <<"foo">>, <<"bar">>, <<"foobarbaz\n">>), - rt:httpc_write(HC, <<"foo">>, <<"bar2">>, <<"foobarbaz2\n">>), + rt_http:httpc_write(HC, <<"foo">>, <<"bar">>, <<"foobarbaz\n">>), + rt_http:httpc_write(HC, <<"foo">>, <<"bar2">>, <<"foobarbaz2\n">>), put_keys(Node, ?BUCKET, ?NUM_KEYS), put_buckets(Node, ?NUM_BUCKETS), @@ -155,7 +155,7 @@ confirm() -> lager:info("Checking HTTP"), - LHC = rt:httpc(Node), + LHC = rt_http:httpc(Node), lager:info("Checking keys timeout"), ?assertMatch({error, <<"timeout">>}, rhc:list_keys(LHC, ?BUCKET, Short)), diff --git a/tests/verify_asis_put.erl b/tests/verify_asis_put.erl index 16bbc45fb..7e5a71df5 100644 --- a/tests/verify_asis_put.erl +++ b/tests/verify_asis_put.erl @@ -23,8 +23,8 @@ confirm() -> %% 3. Repeat with HTTP, nodes reversed lager:info("Put new object in ~p via HTTP.", [Node2]), - HTTP1 = rt:httpc(Node1), - HTTP2 = rt:httpc(Node2), + HTTP1 = rt_http:httpc(Node1), + HTTP2 = rt_http:httpc(Node2), Obj2 = riakc_obj:new(<<"verify_asis_put">>, <<"2">>, <<"test">>, "text/plain"), %% a. put in node 2 %% b. fetch from node 2 for vclock diff --git a/tests/verify_commit_hooks.erl b/tests/verify_commit_hooks.erl index b2d57636e..8e754bb7d 100644 --- a/tests/verify_commit_hooks.erl +++ b/tests/verify_commit_hooks.erl @@ -34,36 +34,36 @@ confirm() -> ?assertEqual(ok, rpc:call(Node, hooks, set_hooks, [])), lager:info("Checking precommit atom failure reason."), - HTTP = rt:httpc(Node), + HTTP = rt_http:httpc(Node), ?assertMatch({error, {ok, "500", _, _}}, - rt:httpc_write(HTTP, <<"failatom">>, <<"key">>, <<"value">>)), + rt_http:httpc_write(HTTP, <<"failatom">>, <<"key">>, <<"value">>)), lager:info("Checking Bug 1145 - string failure reason"), ?assertMatch({error, {ok, "403", _, _}}, - rt:httpc_write(HTTP, <<"failstr">>, <<"key">>, <<"value">>)), + rt_http:httpc_write(HTTP, <<"failstr">>, <<"key">>, <<"value">>)), lager:info("Checking Bug 1145 - binary failure reason"), ?assertMatch({error, {ok, "403", _, _}}, - rt:httpc_write(HTTP, <<"failbin">>, <<"key">>, <<"value">>)), + rt_http:httpc_write(HTTP, <<"failbin">>, <<"key">>, <<"value">>)), lager:info("Checking that bucket without commit hooks passes."), - ?assertEqual(ok, rt:httpc_write(HTTP, <<"fail">>, <<"key">>, <<"value">>)), + ?assertEqual(ok, rt_http:httpc_write(HTTP, <<"fail">>, <<"key">>, <<"value">>)), lager:info("Checking that bucket with passing precommit passes."), - ?assertEqual(ok, rt:httpc_write(HTTP, <<"failkey">>, <<"key">>, <<"value">>)), + ?assertEqual(ok, rt_http:httpc_write(HTTP, <<"failkey">>, <<"key">>, <<"value">>)), lager:info("Checking that bucket with failing precommit fails."), ?assertMatch({error, {ok, "403", _, _}}, - rt:httpc_write(HTTP, <<"failkey">>, <<"fail">>, <<"value">>)), + rt_http:httpc_write(HTTP, <<"failkey">>, <<"fail">>, <<"value">>)), lager:info("Checking fix for BZ1244 - riak_kv_wm_object makes call to riak_client:get/3 with invalid type for key"), %% riak_kv_wm_object:ensure_doc will return {error, not_found}, leading to 404. %% see https://github.com/basho/riak_kv/pull/237 for details of the fix. ?assertMatch({error, {ok, "404", _, _}}, - rt:httpc_write(HTTP, <<"bz1244bucket">>, undefined, <<"value">>)), + rt_http:httpc_write(HTTP, <<"bz1244bucket">>, undefined, <<"value">>)), lager:info("Checking that postcommit fires."), - ?assertMatch(ok, rt:httpc_write(HTTP, <<"postcommit">>, <<"key">>, <<"value">>)), + ?assertMatch(ok, rt_http:httpc_write(HTTP, <<"postcommit">>, <<"key">>, <<"value">>)), receive {wrote, _Bucket, _Key}=Msg -> diff --git a/tests/verify_counter_capability.erl b/tests/verify_counter_capability.erl index e8d3aa747..17247e7d6 100644 --- a/tests/verify_counter_capability.erl +++ b/tests/verify_counter_capability.erl @@ -81,4 +81,4 @@ confirm() -> pass. get_clients(Node) -> - {rt_pb:pbc(Node), rt:httpc(Node)}. + {rt_pb:pbc(Node), rt_http:httpc(Node)}. diff --git a/tests/verify_counter_converge.erl b/tests/verify_counter_converge.erl index adc067b40..bad3f90b3 100644 --- a/tests/verify_counter_converge.erl +++ b/tests/verify_counter_converge.erl @@ -34,7 +34,7 @@ confirm() -> Key = <<"a">>, [N1, N2, N3, N4]=Nodes = rt_cluster:build_cluster(4), - [C1, C2, C3, C4]=Clients = [ rt:httpc(N) || N <- Nodes ], + [C1, C2, C3, C4]=Clients = [ rt_http:httpc(N) || N <- Nodes ], set_allow_mult_true(Nodes), diff --git a/tests/verify_counter_repl.erl b/tests/verify_counter_repl.erl index b1a0689d2..c5068f2ad 100644 --- a/tests/verify_counter_repl.erl +++ b/tests/verify_counter_repl.erl @@ -73,7 +73,7 @@ make_cluster(Nodes, Name) -> repl_util:make_cluster(Nodes), repl_util:name_cluster(hd(Nodes), Name), repl_util:wait_until_leader_converge(Nodes), - Clients = [ rt:httpc(Node) || Node <- Nodes ], + Clients = [ rt_http:httpc(Node) || Node <- Nodes ], lists:zip(Clients, Nodes). increment_cluster_counter(Cluster) -> diff --git a/tests/verify_crdt_capability.erl b/tests/verify_crdt_capability.erl index cf6710904..2179237ac 100644 --- a/tests/verify_crdt_capability.erl +++ b/tests/verify_crdt_capability.erl @@ -93,4 +93,4 @@ gen_counter_op() -> riakc_counter:to_op(riakc_counter:increment(riakc_counter:new())). get_clients(Node) -> - {rt_pb:pbc(Node), rt:httpc(Node)}. + {rt_pb:pbc(Node), rt_http:httpc(Node)}. diff --git a/tests/verify_dt_converge.erl b/tests/verify_dt_converge.erl index a963ee2e9..a8d3dcf4e 100644 --- a/tests/verify_dt_converge.erl +++ b/tests/verify_dt_converge.erl @@ -130,7 +130,7 @@ create_pb_clients(Nodes) -> end || N <- Nodes]. create_http_clients(Nodes) -> - [ rt:httpc(N) || N <- Nodes ]. + [ rt_http:httpc(N) || N <- Nodes ]. create_bucket_types([N1|_]=Nodes, Types) -> lager:info("Creating bucket types with datatypes: ~p", [Types]), diff --git a/tests/verify_dt_upgrade.erl b/tests/verify_dt_upgrade.erl index c7562edfb..e2be054c3 100644 --- a/tests/verify_dt_upgrade.erl +++ b/tests/verify_dt_upgrade.erl @@ -52,7 +52,7 @@ populate_counters(Node) -> rt:wait_for_service(Node, riak_kv), ?assertEqual(ok, rt:wait_until_capability(Node, {riak_kv, crdt}, [pncounter])), - RHC = rt:httpc(Node), + RHC = rt_http:httpc(Node), ?assertMatch(ok, rhc:counter_incr(RHC, ?COUNTER_BUCKET, <<"httpkey">>, 2)), ?assertMatch({ok, 2}, rhc:counter_val(RHC, ?COUNTER_BUCKET, <<"httpkey">>)), @@ -65,7 +65,7 @@ populate_counters(Node) -> %% check that you can get via default bucket verify_counters(Node) -> lager:info("Verifying counters on ~p", [Node]), - RHC = rt:httpc(Node), + RHC = rt_http:httpc(Node), ?assertMatch({ok, 4}, rhc:counter_val(RHC, ?COUNTER_BUCKET, <<"pbkey">>)), PBC = rt_pb:pbc(Node), diff --git a/tests/verify_listkeys.erl b/tests/verify_listkeys.erl index ab7fd56bc..b7cbe73f7 100644 --- a/tests/verify_listkeys.erl +++ b/tests/verify_listkeys.erl @@ -103,7 +103,7 @@ list_keys(Node, Interface, Bucket, Attempt, Num, ShouldPass) -> Pid = rt_pb:pbc(Node), Mod = riakc_pb_socket; http -> - Pid = rt:httpc(Node), + Pid = rt_http:httpc(Node), Mod = rhc end, lager:info("Listing keys on ~p using ~p. Attempt #~p", @@ -131,7 +131,7 @@ list_keys_for_undefined_bucket_type(Node, Interface, Bucket, Attempt, ShouldPass Pid = rt_pb:pbc(Node), Mod = riakc_pb_socket; http -> - Pid = rt:httpc(Node), + Pid = rt_http:httpc(Node), Mod = rhc end, @@ -165,7 +165,7 @@ list_buckets(Node, Interface, Attempt, Num, ShouldPass) -> Pid = rt_pb:pbc(Node), Mod = riakc_pb_socket; http -> - Pid = rt:httpc(Node), + Pid = rt_http:httpc(Node), Mod = rhc end, lager:info("Listing buckets on ~p using ~p. Attempt #~p", @@ -199,7 +199,7 @@ list_buckets_for_undefined_bucket_type(Node, Interface, Attempt, ShouldPass) -> Pid = rt_pb:pbc(Node), Mod = riakc_pb_socket; http -> - Pid = rt:httpc(Node), + Pid = rt_http:httpc(Node), Mod = rhc end, diff --git a/tests/verify_riak_stats.erl b/tests/verify_riak_stats.erl index 5cbd461bd..e7a136c77 100644 --- a/tests/verify_riak_stats.erl +++ b/tests/verify_riak_stats.erl @@ -51,9 +51,9 @@ confirm() -> lager:info("perform 5 x PUT and a GET to increment the stats"), lager:info("as the stat system only does calcs for > 5 readings"), - C = rt:httpc(Node1), - [rt:httpc_write(C, <<"systest">>, <>, <<"12345">>) || X <- lists:seq(1, 5)], - [rt:httpc_read(C, <<"systest">>, <>) || X <- lists:seq(1, 5)], + C = rt_http:httpc(Node1), + [rt_http:httpc_write(C, <<"systest">>, <>, <<"12345">>) || X <- lists:seq(1, 5)], + [rt_http:httpc_read(C, <<"systest">>, <>) || X <- lists:seq(1, 5)], Stats2 = get_stats(Node1), From ecef227a3c8316206d6b21cc27841c296b534c7c Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 15:44:37 -0400 Subject: [PATCH 08/17] Move node-related functions from rt module to rt_node. --- src/riak_test_group_leader.erl | 2 +- src/riak_test_runner.erl | 2 +- src/rt.erl | 146 ------------------ src/rt_cluster.erl | 2 +- src/rt_config.erl | 12 +- src/rt_local.erl | 2 +- src/rt_node.erl | 173 ++++++++++++++++++++++ tests/basic_command_line.erl | 2 +- tests/cluster_meta_basic.erl | 4 +- tests/cluster_meta_rmr.erl | 4 +- tests/ensemble_basic2.erl | 2 +- tests/ensemble_basic3.erl | 4 +- tests/ensemble_basic4.erl | 4 +- tests/ensemble_interleave.erl | 4 +- tests/ensemble_remove_node.erl | 4 +- tests/ensemble_remove_node2.erl | 4 +- tests/ensemble_sync.erl | 4 +- tests/gh_riak_core_154.erl | 10 +- tests/gh_riak_core_155.erl | 2 +- tests/jmx_verify.erl | 4 +- tests/pr_pw.erl | 4 +- tests/replication/repl_aae_fullsync.erl | 2 +- tests/replication/repl_util.erl | 2 +- tests/replication/replication.erl | 8 +- tests/replication/replication2.erl | 20 +-- tests/replication/replication2_pg.erl | 10 +- tests/riak_control_authentication.erl | 2 +- tests/rt_basic_test.erl | 2 +- tests/verify_build_cluster.erl | 14 +- tests/verify_capabilities.erl | 2 +- tests/verify_counter_converge.erl | 4 +- tests/verify_down.erl | 6 +- tests/verify_dt_context.erl | 4 +- tests/verify_dt_converge.erl | 4 +- tests/verify_listkeys.erl | 10 +- tests/verify_membackend.erl | 2 +- tests/verify_mr_prereduce_node_down.erl | 2 +- tests/verify_riak_lager.erl | 4 +- tests/verify_secondary_index_reformat.erl | 4 +- tests/verify_staged_clustering.erl | 2 +- 40 files changed, 263 insertions(+), 236 deletions(-) create mode 100644 src/rt_node.erl diff --git a/src/riak_test_group_leader.erl b/src/riak_test_group_leader.erl index 5a673ee4b..230c5e761 100644 --- a/src/riak_test_group_leader.erl +++ b/src/riak_test_group_leader.erl @@ -107,4 +107,4 @@ io_requests(_, Result) -> %% If we get multiple lines, we'll split them up for lager to maximize the prettiness. log_chars(Chars) -> - [lager:info("~s", [Line]) || Line <- string:tokens(lists:flatten(Chars), "\n")]. \ No newline at end of file + [lager:info("~s", [Line]) || Line <- string:tokens(lists:flatten(Chars), "\n")]. diff --git a/src/riak_test_runner.erl b/src/riak_test_runner.erl index a1a263c9c..b245d8cee 100644 --- a/src/riak_test_runner.erl +++ b/src/riak_test_runner.erl @@ -174,7 +174,7 @@ compose_confirm_fun({ConfirmMod, ConfirmFun}, ensure_all_nodes_running(Nodes) -> [begin - ok = rt:start_and_wait(Node), + ok = rt_node:start_and_wait(Node), ok = rt:wait_until_registered(Node, riak_core_ring_manager) end || Node <- Nodes]. diff --git a/src/rt.erl b/src/rt.erl index 3aed6408c..e0bfe2717 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -31,7 +31,6 @@ -export([ admin/2, assert_nodes_agree_about_ownership/1, - async_start/1, attach/2, attach_direct/2, brutal_kill/1, @@ -45,7 +44,6 @@ connection_info/1, console/2, create_and_activate_bucket_type/3, - down/2, enable_search_hook/2, expect_in_log/2, get_deps/0, @@ -54,38 +52,28 @@ get_replica/5, get_ring/1, get_version/0, - heal/1, is_mixed_cluster/1, is_pingable/1, - join/2, - leave/1, load_modules_on_nodes/2, log_to_nodes/2, log_to_nodes/3, members_according_to/1, nearest_ringsize/1, owners_according_to/1, - partition/2, partitions_for_node/1, pmap/2, post_result/2, priv_dir/0, - remove/2, riak/2, riak_repl/2, rpc_get_env/2, setup_harness/2, setup_log_capture/1, - slow_upgrade/3, stream_cmd/1, stream_cmd/2, spawn_cmd/1, spawn_cmd/2, search_cmd/2, - start/1, - start_and_wait/1, status_of_according_to/2, - stop/1, - stop_and_wait/1, str/2, systest_read/2, systest_read/3, @@ -95,8 +83,6 @@ systest_write/3, systest_write/5, systest_write/6, - upgrade/2, - upgrade/3, wait_for_cluster_service/2, wait_for_cmd/1, wait_for_service/2, @@ -195,101 +181,6 @@ connection_info(Node) when is_atom(Node) -> connection_info(Nodes) when is_list(Nodes) -> [ {Node, connection_info(Node)} || Node <- Nodes]. - -%% @doc Start the specified Riak node -start(Node) -> - ?HARNESS:start(Node). - -%% @doc Start the specified Riak `Node' and wait for it to be pingable -start_and_wait(Node) -> - start(Node), - ?assertEqual(ok, wait_until_pingable(Node)). - -async_start(Node) -> - spawn(fun() -> start(Node) end). - -%% @doc Stop the specified Riak `Node'. -stop(Node) -> - lager:info("Stopping riak on ~p", [Node]), - timer:sleep(10000), %% I know, I know! - ?HARNESS:stop(Node). - %%rpc:call(Node, init, stop, []). - -%% @doc Stop the specified Riak `Node' and wait until it is not pingable -stop_and_wait(Node) -> - stop(Node), - ?assertEqual(ok, wait_until_unpingable(Node)). - -%% @doc Upgrade a Riak `Node' to the specified `NewVersion'. -upgrade(Node, NewVersion) -> - ?HARNESS:upgrade(Node, NewVersion). - -%% @doc Upgrade a Riak `Node' to the specified `NewVersion' and update -%% the config based on entries in `Config'. -upgrade(Node, NewVersion, Config) -> - ?HARNESS:upgrade(Node, NewVersion, Config). - -%% @doc Upgrade a Riak node to a specific version using the alternate -%% leave/upgrade/rejoin approach -slow_upgrade(Node, NewVersion, Nodes) -> - lager:info("Perform leave/upgrade/join upgrade on ~p", [Node]), - lager:info("Leaving ~p", [Node]), - leave(Node), - ?assertEqual(ok, rt:wait_until_unpingable(Node)), - upgrade(Node, NewVersion), - lager:info("Rejoin ~p", [Node]), - join(Node, hd(Nodes -- [Node])), - lager:info("Wait until all nodes are ready and there are no pending changes"), - ?assertEqual(ok, wait_until_nodes_ready(Nodes)), - ?assertEqual(ok, wait_until_no_pending_changes(Nodes)), - ok. - -%% @doc Have `Node' send a join request to `PNode' -join(Node, PNode) -> - R = rpc:call(Node, riak_core, join, [PNode]), - lager:info("[join] ~p to (~p): ~p", [Node, PNode, R]), - ?assertEqual(ok, R), - ok. - -%% @doc Have `Node' send a join request to `PNode' -staged_join(Node, PNode) -> - R = rpc:call(Node, riak_core, staged_join, [PNode]), - lager:info("[join] ~p to (~p): ~p", [Node, PNode, R]), - ?assertEqual(ok, R), - ok. - -plan_and_commit(Node) -> - timer:sleep(500), - lager:info("planning and commiting cluster join"), - case rpc:call(Node, riak_core_claimant, plan, []) of - {error, ring_not_ready} -> - lager:info("plan: ring not ready"), - timer:sleep(100), - maybe_wait_for_changes(Node), - plan_and_commit(Node); - {ok, _, _} -> - do_commit(Node) - end. - -do_commit(Node) -> - case rpc:call(Node, riak_core_claimant, commit, []) of - {error, plan_changed} -> - lager:info("commit: plan changed"), - timer:sleep(100), - maybe_wait_for_changes(Node), - plan_and_commit(Node); - {error, ring_not_ready} -> - lager:info("commit: ring not ready"), - timer:sleep(100), - maybe_wait_for_changes(Node), - do_commit(Node); - {error,nothing_planned} -> - %% Assume plan actually committed somehow - ok; - ok -> - ok - end. - maybe_wait_for_changes(Node) -> Ring = get_ring(Node), Changes = riak_core_ring:pending_changes(Ring), @@ -302,43 +193,6 @@ maybe_wait_for_changes(Node) -> ok = wait_until_no_pending_changes([Node]) end. -%% @doc Have the `Node' leave the cluster -leave(Node) -> - R = rpc:call(Node, riak_core, leave, []), - lager:info("[leave] ~p: ~p", [Node, R]), - ?assertEqual(ok, R), - ok. - -%% @doc Have `Node' remove `OtherNode' from the cluster -remove(Node, OtherNode) -> - ?assertEqual(ok, - rpc:call(Node, riak_kv_console, remove, [[atom_to_list(OtherNode)]])). - -%% @doc Have `Node' mark `OtherNode' as down -down(Node, OtherNode) -> - rpc:call(Node, riak_kv_console, down, [[atom_to_list(OtherNode)]]). - -%% @doc partition the `P1' from `P2' nodes -%% note: the nodes remained connected to riak_test@local, -%% which is how `heal/1' can still work. -partition(P1, P2) -> - OldCookie = rpc:call(hd(P1), erlang, get_cookie, []), - NewCookie = list_to_atom(lists:reverse(atom_to_list(OldCookie))), - [true = rpc:call(N, erlang, set_cookie, [N, NewCookie]) || N <- P1], - [[true = rpc:call(N, erlang, disconnect_node, [P2N]) || N <- P1] || P2N <- P2], - wait_until_partitioned(P1, P2), - {NewCookie, OldCookie, P1, P2}. - -%% @doc heal the partition created by call to `partition/2' -%% `OldCookie' is the original shared cookie -heal({_NewCookie, OldCookie, P1, P2}) -> - Cluster = P1 ++ P2, - % set OldCookie on P1 Nodes - [true = rpc:call(N, erlang, set_cookie, [N, OldCookie]) || N <- P1], - wait_until_connected(Cluster), - {_GN, []} = rpc:sbcast(Cluster, riak_core_node_watcher, broadcast), - ok. - %% @doc Spawn `Cmd' on the machine running the test harness spawn_cmd(Cmd) -> ?HARNESS:spawn_cmd(Cmd). diff --git a/src/rt_cluster.erl b/src/rt_cluster.erl index 1507a781f..cab1cd92c 100644 --- a/src/rt_cluster.erl +++ b/src/rt_cluster.erl @@ -204,7 +204,7 @@ clean_data_dir(Nodes, SubDir) when is_list(Nodes) -> teardown() -> %% stop all connected nodes, 'cause it'll be faster that %%lager:info("RPC stopping these nodes ~p", [nodes()]), - %%[ rt:stop(Node) || Node <- nodes()], + %%[ rt_node:stop(Node) || Node <- nodes()], %% Then do the more exhaustive harness thing, in case something was up %% but not connected. ?HARNESS:teardown(). diff --git a/src/rt_config.erl b/src/rt_config.erl index 8da5a9539..28ffa6cb5 100644 --- a/src/rt_config.erl +++ b/src/rt_config.erl @@ -133,29 +133,29 @@ config_or_os_env(Config, Default) -> set_conf(all, NameValuePairs) -> ?HARNESS:set_conf(all, NameValuePairs); set_conf(Node, NameValuePairs) -> - rt:stop(Node), + rt_node:stop(Node), ?assertEqual(ok, rt:wait_until_unpingable(Node)), ?HARNESS:set_conf(Node, NameValuePairs), - rt:start(Node). + rt_node:start(Node). -spec set_advanced_conf(atom(), [{string(), string()}]) -> ok. set_advanced_conf(all, NameValuePairs) -> ?HARNESS:set_advanced_conf(all, NameValuePairs); set_advanced_conf(Node, NameValuePairs) -> - rt:stop(Node), + rt_node:stop(Node), ?assertEqual(ok, rt:wait_until_unpingable(Node)), ?HARNESS:set_advanced_conf(Node, NameValuePairs), - rt:start(Node). + rt_node:start(Node). %% @doc Rewrite the given node's app.config file, overriding the varialbes %% in the existing app.config with those in `Config'. update_app_config(all, Config) -> ?HARNESS:update_app_config(all, Config); update_app_config(Node, Config) -> - rt:stop(Node), + rt_node:stop(Node), ?assertEqual(ok, rt:wait_until_unpingable(Node)), ?HARNESS:update_app_config(Node, Config), - rt:start(Node). + rt_node:start(Node). version_to_config(Config) when is_tuple(Config)-> Config; version_to_config(Version) -> {Version, default}. diff --git a/src/rt_local.erl b/src/rt_local.erl index b8bfaed8e..fd4517e17 100644 --- a/src/rt_local.erl +++ b/src/rt_local.erl @@ -117,4 +117,4 @@ stream_cmd_loop(Port, Buffer, NewLineBuffer, Time={_MegaSecs, Secs, _MicroSecs}) {Status, Buffer} after rt_config:get(rt_max_wait_time) -> {-1, Buffer} - end. \ No newline at end of file + end. diff --git a/src/rt_node.erl b/src/rt_node.erl new file mode 100644 index 000000000..6a10664c1 --- /dev/null +++ b/src/rt_node.erl @@ -0,0 +1,173 @@ +%% ------------------------------------------------------------------- +%% +%% Copyright (c) 2013-2014 Basho Technologies, Inc. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- +-module(rt_node). +-include("rt.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-export([start/1, + start_and_wait/1, + async_start/1, + stop/1, + stop_and_wait/1, + upgrade/2, + upgrade/3, + slow_upgrade/3, + join/2, + staged_join/2, + plan_and_commit/1, + do_commit/1, + leave/1, + down/2, + heal/1, + partition/2, + remove/2]). + +-define(HARNESS, (rt_config:get(rt_harness))). + +%% @doc Start the specified Riak node +start(Node) -> + ?HARNESS:start(Node). + +%% @doc Start the specified Riak `Node' and wait for it to be pingable +start_and_wait(Node) -> + start(Node), + ?assertEqual(ok, rt:wait_until_pingable(Node)). + +async_start(Node) -> + spawn(fun() -> start(Node) end). + +%% @doc Stop the specified Riak `Node'. +stop(Node) -> + lager:info("Stopping riak on ~p", [Node]), + timer:sleep(10000), %% I know, I know! + ?HARNESS:stop(Node). + %%rpc:call(Node, init, stop, []). + +%% @doc Stop the specified Riak `Node' and wait until it is not pingable +stop_and_wait(Node) -> + stop(Node), + ?assertEqual(ok, rt:wait_until_unpingable(Node)). + +%% @doc Upgrade a Riak `Node' to the specified `NewVersion'. +upgrade(Node, NewVersion) -> + ?HARNESS:upgrade(Node, NewVersion). + +%% @doc Upgrade a Riak `Node' to the specified `NewVersion' and update +%% the config based on entries in `Config'. +upgrade(Node, NewVersion, Config) -> + ?HARNESS:upgrade(Node, NewVersion, Config). + +%% @doc Upgrade a Riak node to a specific version using the alternate +%% leave/upgrade/rejoin approach +slow_upgrade(Node, NewVersion, Nodes) -> + lager:info("Perform leave/upgrade/join upgrade on ~p", [Node]), + lager:info("Leaving ~p", [Node]), + leave(Node), + ?assertEqual(ok, rt:wait_until_unpingable(Node)), + upgrade(Node, NewVersion), + lager:info("Rejoin ~p", [Node]), + join(Node, hd(Nodes -- [Node])), + lager:info("Wait until all nodes are ready and there are no pending changes"), + ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), + ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes)), + ok. + +%% @doc Have `Node' send a join request to `PNode' +join(Node, PNode) -> + R = rpc:call(Node, riak_core, join, [PNode]), + lager:info("[join] ~p to (~p): ~p", [Node, PNode, R]), + ?assertEqual(ok, R), + ok. + +%% @doc Have `Node' send a join request to `PNode' +staged_join(Node, PNode) -> + R = rpc:call(Node, riak_core, staged_join, [PNode]), + lager:info("[join] ~p to (~p): ~p", [Node, PNode, R]), + ?assertEqual(ok, R), + ok. + +plan_and_commit(Node) -> + timer:sleep(500), + lager:info("planning and commiting cluster join"), + case rpc:call(Node, riak_core_claimant, plan, []) of + {error, ring_not_ready} -> + lager:info("plan: ring not ready"), + timer:sleep(100), + rt:maybe_wait_for_changes(Node), + plan_and_commit(Node); + {ok, _, _} -> + do_commit(Node) + end. + +do_commit(Node) -> + case rpc:call(Node, riak_core_claimant, commit, []) of + {error, plan_changed} -> + lager:info("commit: plan changed"), + timer:sleep(100), + rt:maybe_wait_for_changes(Node), + plan_and_commit(Node); + {error, ring_not_ready} -> + lager:info("commit: ring not ready"), + timer:sleep(100), + rt:maybe_wait_for_changes(Node), + do_commit(Node); + {error,nothing_planned} -> + %% Assume plan actually committed somehow + ok; + ok -> + ok + end. + +%% @doc Have the `Node' leave the cluster +leave(Node) -> + R = rpc:call(Node, riak_core, leave, []), + lager:info("[leave] ~p: ~p", [Node, R]), + ?assertEqual(ok, R), + ok. + +%% @doc Have `Node' remove `OtherNode' from the cluster +remove(Node, OtherNode) -> + ?assertEqual(ok, + rpc:call(Node, riak_kv_console, remove, [[atom_to_list(OtherNode)]])). + +%% @doc Have `Node' mark `OtherNode' as down +down(Node, OtherNode) -> + rpc:call(Node, riak_kv_console, down, [[atom_to_list(OtherNode)]]). + +%% @doc partition the `P1' from `P2' nodes +%% note: the nodes remained connected to riak_test@local, +%% which is how `heal/1' can still work. +partition(P1, P2) -> + OldCookie = rpc:call(hd(P1), erlang, get_cookie, []), + NewCookie = list_to_atom(lists:reverse(atom_to_list(OldCookie))), + [true = rpc:call(N, erlang, set_cookie, [N, NewCookie]) || N <- P1], + [[true = rpc:call(N, erlang, disconnect_node, [P2N]) || N <- P1] || P2N <- P2], + rt:wait_until_partitioned(P1, P2), + {NewCookie, OldCookie, P1, P2}. + +%% @doc heal the partition created by call to `partition/2' +%% `OldCookie' is the original shared cookie +heal({_NewCookie, OldCookie, P1, P2}) -> + Cluster = P1 ++ P2, + % set OldCookie on P1 Nodes + [true = rpc:call(N, erlang, set_cookie, [N, OldCookie]) || N <- P1], + rt:wait_until_connected(Cluster), + {_GN, []} = rpc:sbcast(Cluster, riak_core_node_watcher, broadcast), + ok. diff --git a/tests/basic_command_line.erl b/tests/basic_command_line.erl index 443cf73c8..398876c32 100644 --- a/tests/basic_command_line.erl +++ b/tests/basic_command_line.erl @@ -104,7 +104,7 @@ ping_up_test(Node) -> lager:info("Testing riak ping on ~s", [Node]), %% ping / pong - %% rt:start_and_wait(Node), + %% rt_node:start_and_wait(Node), lager:info("Node up, should ping"), {ok, PongOut} = rt:riak(Node, ["ping"]), ?assert(rt:str(PongOut, "pong")), diff --git a/tests/cluster_meta_basic.erl b/tests/cluster_meta_basic.erl index 188763ad6..cb4cf0935 100644 --- a/tests/cluster_meta_basic.erl +++ b/tests/cluster_meta_basic.erl @@ -56,13 +56,13 @@ test_writes_after_partial_cluster_failure([N1 | _]=Nodes) -> StopNodes = eager_peers(N1, N1), AliveNodes = Nodes -- StopNodes, lager:info("stopping nodes: ~p remaining nodes: ~p", [StopNodes, AliveNodes]), - [rt:stop(N) || N <- StopNodes], + [rt_node:stop(N) || N <- StopNodes], metadata_put(N1, ?PREFIX1, ?KEY1, ?VAL2), wait_until_metadata_value(AliveNodes, ?PREFIX1, ?KEY1, ?VAL2), lager:info("bring stopped nodes back up: ~p", [StopNodes]), - [rt:start(N) || N <- StopNodes], + [rt_node:start(N) || N <- StopNodes], wait_until_metadata_value(Nodes, ?PREFIX1, ?KEY1, ?VAL2), ok. diff --git a/tests/cluster_meta_rmr.erl b/tests/cluster_meta_rmr.erl index fe7f73e3a..fee5d840f 100644 --- a/tests/cluster_meta_rmr.erl +++ b/tests/cluster_meta_rmr.erl @@ -58,7 +58,7 @@ run(NumNodes, NumRounds, StableRounds) -> calc_stuff(AllNodes, NumNodes, NumRounds), exit(Pid, kill), %% start all the down nodes so we can clean them :( - [rt:start(Node) || Node <- DownNodes], + [rt_node:start(Node) || Node <- DownNodes], rt_cluster:clean_cluster(AllNodes). setup_nodes(NumNodes) -> @@ -108,7 +108,7 @@ run_rounds(Round, StableRound, SendFun, ConsistentFun, [SenderNode | _]=UpNodes, fail_node(Round, OtherNodes) -> Failed = lists:nth(random:uniform(length(OtherNodes)), OtherNodes), lager:info("round: ~p (unstable): shutting down ~p", [Round, Failed]), - rt:stop(Failed), + rt_node:stop(Failed), {Failed, lists:delete(Failed, OtherNodes)}. calc_stuff(AllNodes, NumNodes, NumRounds) -> diff --git a/tests/ensemble_basic2.erl b/tests/ensemble_basic2.erl index dee9e815f..6af2f2796 100644 --- a/tests/ensemble_basic2.erl +++ b/tests/ensemble_basic2.erl @@ -43,7 +43,7 @@ confirm() -> riak_kv_vnode_orig:init_orig(Args) end}}]}), rt:stop_and_wait(Node), - rt:start(Node), + rt_node:start(Node), lager:info("Polling peers while riak_kv starts. We should see none"), UpNoPeersFun = fun() -> diff --git a/tests/ensemble_basic3.erl b/tests/ensemble_basic3.erl index 92953cd63..529c561cc 100644 --- a/tests/ensemble_basic3.erl +++ b/tests/ensemble_basic3.erl @@ -58,7 +58,7 @@ confirm() -> PBC = rt_pb:pbc(Node), lager:info("Partitioning quorum minority: ~p", [Partitioned]), - Part = rt:partition(Nodes -- Partitioned, Partitioned), + Part = rt_node:partition(Nodes -- Partitioned, Partitioned), ensemble_util:wait_until_stable(Node, Quorum), lager:info("Writing ~p consistent keys", [1000]), @@ -68,7 +68,7 @@ confirm() -> [rt_pb:pbc_read(PBC, Bucket, Key) || Key <- Keys], lager:info("Healing partition"), - rt:heal(Part), + rt_node:heal(Part), lager:info("Suspending majority vnodes"), L = [begin diff --git a/tests/ensemble_basic4.erl b/tests/ensemble_basic4.erl index 35cd92f50..2050e0eeb 100644 --- a/tests/ensemble_basic4.erl +++ b/tests/ensemble_basic4.erl @@ -51,7 +51,7 @@ confirm() -> PBC = rt_pb:pbc(Node), lager:info("Partitioning quorum minority: ~p", [Partitioned]), - Part = rt:partition(Nodes -- Partitioned, Partitioned), + Part = rt_node:partition(Nodes -- Partitioned, Partitioned), rpc:multicall(Nodes, riak_kv_entropy_manager, set_mode, [manual]), ensemble_util:wait_until_stable(Node, Quorum), @@ -62,6 +62,6 @@ confirm() -> [rt_pb:pbc_read(PBC, Bucket, Key) || Key <- Keys], lager:info("Healing partition"), - rt:heal(Part), + rt_node:heal(Part), pass. diff --git a/tests/ensemble_interleave.erl b/tests/ensemble_interleave.erl index 105bffffa..5aeb78ca4 100644 --- a/tests/ensemble_interleave.erl +++ b/tests/ensemble_interleave.erl @@ -69,7 +69,7 @@ confirm() -> Options = [{timeout, 500}], rpc:multicall(Nodes, riak_kv_entropy_manager, set_mode, [manual]), - Part = rt:partition(Nodes -- Partitioned, Partitioned), + Part = rt_node:partition(Nodes -- Partitioned, Partitioned), ensemble_util:wait_until_stable(Node, Quorum), lager:info("Writing ~p consistent keys", [1000]), @@ -77,7 +77,7 @@ confirm() -> lager:info("Read keys to verify they exist"), [rt_pb:pbc_read(PBC, Bucket, Key, Options) || Key <- Keys], - rt:heal(Part), + rt_node:heal(Part), [begin lager:info("Suspending vnode: ~p", [VIdx]), diff --git a/tests/ensemble_remove_node.erl b/tests/ensemble_remove_node.erl index 2d9e8e9e0..cefc0ba89 100644 --- a/tests/ensemble_remove_node.erl +++ b/tests/ensemble_remove_node.erl @@ -54,9 +54,9 @@ confirm() -> end}}]}), lager:info("Removing Nodes 2 and 3 from the cluster"), - rt:leave(Node2), + rt_node:leave(Node2), ok = ensemble_util:wait_until_stable(Node, NVal), - rt:leave(Node3), + rt_node:leave(Node3), ok = ensemble_util:wait_until_stable(Node, NVal), Remaining = Nodes -- [Node2, Node3], rt:wait_until_nodes_agree_about_ownership(Remaining), diff --git a/tests/ensemble_remove_node2.erl b/tests/ensemble_remove_node2.erl index 9d3a6945a..4477de2c6 100644 --- a/tests/ensemble_remove_node2.erl +++ b/tests/ensemble_remove_node2.erl @@ -61,9 +61,9 @@ confirm() -> end}}]}), lager:info("Removing Nodes 2 and 3 from the cluster"), - rt:leave(Node2), + rt_node:leave(Node2), ok = ensemble_util:wait_until_stable(Node, NVal), - rt:leave(Node3), + rt_node:leave(Node3), ok = ensemble_util:wait_until_stable(Node, NVal), Remaining = Nodes -- [Node2, Node3], rt:wait_until_nodes_agree_about_ownership(Remaining), diff --git a/tests/ensemble_sync.erl b/tests/ensemble_sync.erl index 6fa89feac..5128a0ae5 100644 --- a/tests/ensemble_sync.erl +++ b/tests/ensemble_sync.erl @@ -75,7 +75,7 @@ run_scenario(Nodes, NVal, {NumKill, NumSuspend, NumValid, _, Name, Expect}) -> Options = [{timeout, 2000}], rpc:multicall(Nodes, riak_kv_entropy_manager, set_mode, [manual]), - Part = rt:partition(Nodes -- Partitioned, Partitioned), + Part = rt_node:partition(Nodes -- Partitioned, Partitioned), ensemble_util:wait_until_stable(Node, Quorum), %% Write data while minority is partitioned @@ -84,7 +84,7 @@ run_scenario(Nodes, NVal, {NumKill, NumSuspend, NumValid, _, Name, Expect}) -> lager:info("Read keys to verify they exist"), [rt_pb:pbc_read(PBC, Bucket, Key, Options) || Key <- Keys], - rt:heal(Part), + rt_node:heal(Part), %% Suspend desired number of valid vnodes S1 = [vnode_util:suspend_vnode(VNode, VIdx) || {VIdx, VNode} <- SuspendVN], diff --git a/tests/gh_riak_core_154.erl b/tests/gh_riak_core_154.erl index 70882e5dc..dfb53bfb4 100644 --- a/tests/gh_riak_core_154.erl +++ b/tests/gh_riak_core_154.erl @@ -33,24 +33,24 @@ confirm() -> [Node1, Node2] = Nodes, lager:info("Write data while ~p is offline", [Node2]), - rt:stop(Node2), + rt_node:stop(Node2), rt:wait_until_unpingable(Node2), ?assertEqual([], rt:systest_write(Node1, 1000, 3)), lager:info("Verify that ~p is missing data", [Node2]), - rt:start(Node2), - rt:stop(Node1), + rt_node:start(Node2), + rt_node:stop(Node1), rt:wait_until_unpingable(Node1), ?assertMatch([{_,{error,notfound}}|_], rt:systest_read(Node2, 1000, 3)), lager:info("Restart ~p and wait for handoff to occur", [Node1]), - rt:start(Node1), + rt_node:start(Node1), rt:wait_for_service(Node1, riak_kv), rt:wait_until_transfers_complete([Node1]), lager:info("Verify that ~p has all data", [Node2]), - rt:stop(Node1), + rt_node:stop(Node1), ?assertEqual([], rt:systest_read(Node2, 1000, 3)), lager:info("gh_riak_core_154: passed"), diff --git a/tests/gh_riak_core_155.erl b/tests/gh_riak_core_155.erl index fb53b69ce..1e6ffd604 100644 --- a/tests/gh_riak_core_155.erl +++ b/tests/gh_riak_core_155.erl @@ -39,7 +39,7 @@ confirm() -> %% Restart node, add intercept that delay proxy startup, and issue gets. %% Gets will come in before proxies started, and should trigger crash. rt:stop_and_wait(Node), - rt:async_start(Node), + rt_node:async_start(Node), rt:wait_until_pingable(Node), rt_intercept:load_intercepts([Node]), rt_intercept:add(Node, {riak_core_vnode_proxy_sup, diff --git a/tests/jmx_verify.erl b/tests/jmx_verify.erl index de893edcb..06c05b849 100644 --- a/tests/jmx_verify.erl +++ b/tests/jmx_verify.erl @@ -134,7 +134,7 @@ test_supervision() -> lager:info("It can fail, it can fail 10 times"), rt:wait_until(retry_check_fun(Node)), - rt:stop(Node), + rt_node:stop(Node), ok_ok. retry_check_fun(Node) -> @@ -193,7 +193,7 @@ test_application_stop() -> ?assertEqual(nomatch, re:run(rpc:call(Node, os, cmd, ["ps -Af"]), "riak_jmx.jar", [])), - rt:stop(Node). + rt_node:stop(Node). verify_inc(Prev, Props, Keys) -> [begin diff --git a/tests/pr_pw.erl b/tests/pr_pw.erl index 07b31ec30..5cecf6e6d 100644 --- a/tests/pr_pw.erl +++ b/tests/pr_pw.erl @@ -98,7 +98,7 @@ confirm() -> ?assertEqual({error, timeout}, C:put(Obj, [{pw, quorum}])), %% restart the node - rt:start_and_wait(Node), + rt_node:start_and_wait(Node), rt:wait_for_service(Node, riak_kv), %% we can make quorum again @@ -111,7 +111,7 @@ confirm() -> %% reboot the node rt:stop_and_wait(Node2), - rt:start_and_wait(Node2), + rt_node:start_and_wait(Node2), rt:wait_for_service(Node2, riak_kv), %% everything is happy again diff --git a/tests/replication/repl_aae_fullsync.erl b/tests/replication/repl_aae_fullsync.erl index 31c360984..5723f2235 100644 --- a/tests/replication/repl_aae_fullsync.erl +++ b/tests/replication/repl_aae_fullsync.erl @@ -572,7 +572,7 @@ validate_intercepted_fullsync(InterceptTarget, %% Reboot node. rt:stop_and_wait(InterceptTarget), - rt:start_and_wait(InterceptTarget), + rt_node:start_and_wait(InterceptTarget), %% Wait for riak_kv and riak_repl to initialize. rt:wait_for_service(InterceptTarget, riak_kv), diff --git a/tests/replication/repl_util.erl b/tests/replication/repl_util.erl index 35b6a5881..d5175d884 100644 --- a/tests/replication/repl_util.erl +++ b/tests/replication/repl_util.erl @@ -613,7 +613,7 @@ validate_intercepted_fullsync(InterceptTarget, %% Reboot node. rt:stop_and_wait(InterceptTarget), - rt:start_and_wait(InterceptTarget), + rt_node:start_and_wait(InterceptTarget), %% Wait for riak_kv and riak_repl to initialize. rt:wait_for_service(InterceptTarget, riak_kv), diff --git a/tests/replication/replication.erl b/tests/replication/replication.erl index c31ae1507..e09502230 100644 --- a/tests/replication/replication.erl +++ b/tests/replication/replication.erl @@ -148,7 +148,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> rt:log_to_nodes(AllNodes, "Testing master failover: stopping ~p", [LeaderA]), lager:info("Testing master failover: stopping ~p", [LeaderA]), - rt:stop(LeaderA), + rt_node:stop(LeaderA), rt:wait_until_unpingable(LeaderA), wait_until_leader(ASecond), @@ -171,7 +171,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> LeaderB = rpc:call(BFirst, riak_repl_leader, leader_node, []), lager:info("Testing client failover: stopping ~p", [LeaderB]), - rt:stop(LeaderB), + rt_node:stop(LeaderB), rt:wait_until_unpingable(LeaderB), BSecond = hd(BNodes -- [LeaderB]), wait_until_leader(BSecond), @@ -202,7 +202,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> %% lager:info("Restarting down node ~p", [LeaderA]), - rt:start(LeaderA), + rt_node:start(LeaderA), rt:wait_until_pingable(LeaderA), wait_until_no_pending_changes(ANodes), wait_until_leader_converge(ANodes), @@ -270,7 +270,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> end, lager:info("Restarting down node ~p", [LeaderB]), - rt:start(LeaderB), + rt_node:start(LeaderB), rt:wait_until_pingable(LeaderB), case nodes_all_have_version(ANodes, "1.1.0") of diff --git a/tests/replication/replication2.erl b/tests/replication/replication2.erl index 87cee9e05..4e21ba944 100644 --- a/tests/replication/replication2.erl +++ b/tests/replication/replication2.erl @@ -191,7 +191,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> log_to_nodes(AllNodes, "Testing master failover: stopping ~p", [LeaderA]), lager:info("Testing master failover: stopping ~p", [LeaderA]), - rt:stop(LeaderA), + rt_node:stop(LeaderA), rt:wait_until_unpingable(LeaderA), ASecond = hd(ANodes -- [LeaderA]), repl_util:wait_until_leader(ASecond), @@ -217,7 +217,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> log_to_nodes(AllNodes, "Testing client failover: stopping ~p", [LeaderB]), lager:info("Testing client failover: stopping ~p", [LeaderB]), - rt:stop(LeaderB), + rt_node:stop(LeaderB), rt:wait_until_unpingable(LeaderB), BSecond = hd(BNodes -- [LeaderB]), repl_util:wait_until_leader(BSecond), @@ -250,7 +250,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> log_to_nodes(AllNodes, "Test fullsync after restarting ~p", [LeaderA]), lager:info("Restarting down node ~p", [LeaderA]), - rt:start(LeaderA), + rt_node:start(LeaderA), rt:wait_until_pingable(LeaderA), repl_util:start_and_wait_until_fullsync_complete(LeaderA2), @@ -305,7 +305,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> lager:info("Finished Joe's Section"), lager:info("Restarting down node ~p", [LeaderB]), - rt:start(LeaderB), + rt_node:start(LeaderB), rt:wait_until_pingable(LeaderB), lager:info("Nodes restarted"), @@ -441,7 +441,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> lager:info("Stopping node ~p", [Target]), - rt:stop(Target), + rt_node:stop(Target), rt:wait_until_unpingable(Target), lager:info("Starting realtime"), @@ -453,7 +453,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> lager:info("Restarting node ~p", [Target]), - rt:start(Target), + rt_node:start(Target), rt:wait_until_pingable(Target), rt:wait_for_service(Target, riak_repl), timer:sleep(5000), @@ -475,7 +475,7 @@ pb_write_during_shutdown(Target, BSecond, TestBucket) -> spawn(fun() -> timer:sleep(500), lager:info("Stopping node ~p again", [Target]), - rt:stop(Target), + rt_node:stop(Target), lager:info("Node stopped") end), @@ -498,7 +498,7 @@ pb_write_during_shutdown(Target, BSecond, TestBucket) -> lager:info("pb_write_during_shutdown: Ensure node ~p is down before restart", [Target]), ?assertEqual(ok, rt:wait_until_unpingable(Target)), - rt:start(Target), + rt_node:start(Target), rt:wait_until_pingable(Target), rt:wait_for_service(Target, riak_repl), ReadErrors2 = rt:systest_read(Target, 1000, 11000, TestBucket, 2), @@ -533,7 +533,7 @@ http_write_during_shutdown(Target, BSecond, TestBucket) -> spawn(fun() -> timer:sleep(500), lager:info("Stopping node ~p again", [Target]), - rt:stop(Target), + rt_node:stop(Target), lager:info("Node stopped") end), @@ -558,7 +558,7 @@ http_write_during_shutdown(Target, BSecond, TestBucket) -> lager:info("http: write_during_shutdown: Ensure node ~p is down before restart", [Target]), ?assertEqual(ok, rt:wait_until_unpingable(Target)), - rt:start(Target), + rt_node:start(Target), rt:wait_until_pingable(Target), rt:wait_for_service(Target, riak_repl), ReadErrors2 = http_read(C, 12000, 22000, TestBucket, 2), diff --git a/tests/replication/replication2_pg.erl b/tests/replication/replication2_pg.erl index 01d96474f..bb328b4cd 100644 --- a/tests/replication/replication2_pg.erl +++ b/tests/replication/replication2_pg.erl @@ -418,7 +418,7 @@ test_pg_proxy(SSL) -> lager:info("Stopping leader on requester cluster"), PGLeaderB = rpc:call(FirstB, riak_core_cluster_mgr, get_leader, []), rt:log_to_nodes(AllNodes, "Killing leader on requester cluster"), - rt:stop(PGLeaderB), + rt_node:stop(PGLeaderB), [RunningBNode | _ ] = BNodes -- [PGLeaderB], repl_util:wait_until_leader(RunningBNode), PidB2 = rt_pb:pbc(RunningBNode), @@ -428,7 +428,7 @@ test_pg_proxy(SSL) -> lager:info("Stopping leader on provider cluster"), PGLeaderA = rpc:call(FirstA, riak_core_cluster_mgr, get_leader, []), - rt:stop(PGLeaderA), + rt_node:stop(PGLeaderA), [RunningANode | _ ] = ANodes -- [PGLeaderA], repl_util:wait_until_leader(RunningANode), ?assertEqual(ok, wait_until_pg(RunningBNode, PidB2, Bucket, KeyD, CidA)), @@ -530,7 +530,7 @@ test_cluster_mapping(SSL) -> % shut down cluster A lager:info("Shutting down cluster A"), - [ rt:stop(Node) || Node <- ANodes ], + [ rt_node:stop(Node) || Node <- ANodes ], [ rt:wait_until_unpingable(Node) || Node <- ANodes ], rt:wait_until_ring_converged(BNodes), @@ -937,12 +937,12 @@ verify_topology_change(SourceNodes, SinkNodes) -> %% Sad this takes 2.5 minutes lager:info("Removing current leader from the cluster: ~p.", [SinkLeader]), - rt:leave(SinkLeader), + rt_node:leave(SinkLeader), ?assertEqual(ok, rt:wait_until_unpingable(SinkLeader)), %% Wait for everything to restart, and rings to converge. lager:info("Starting leader node back up and waiting for repl."), - rt:start(SinkLeader), + rt_node:start(SinkLeader), rt:wait_for_service(SinkLeader, riak_repl), rt:wait_until_ring_converged(SinkNodes), diff --git a/tests/riak_control_authentication.erl b/tests/riak_control_authentication.erl index a1fce9f70..3e8db3b00 100644 --- a/tests/riak_control_authentication.erl +++ b/tests/riak_control_authentication.erl @@ -218,7 +218,7 @@ build_singleton_cluster(Vsn, Config) -> %% take effect. Node = lists:nth(1, Nodes), rt:stop_and_wait(Node), - rt:start_and_wait(Node), + rt_node:start_and_wait(Node), rt:wait_for_service(Node, riak_kv), %% Wait for control to start. diff --git a/tests/rt_basic_test.erl b/tests/rt_basic_test.erl index 67aeae122..ed91736e0 100644 --- a/tests/rt_basic_test.erl +++ b/tests/rt_basic_test.erl @@ -25,5 +25,5 @@ confirm() -> lager:info("Deploy some nodes"), Nodes = rt_cluster:deploy_nodes(2), lager:info("Stop the nodes"), - [rt:stop(Node) || Node <- Nodes], + [rt_node:stop(Node) || Node <- Nodes], pass. diff --git a/tests/verify_build_cluster.erl b/tests/verify_build_cluster.erl index a5f2e5abd..051d5c616 100644 --- a/tests/verify_build_cluster.erl +++ b/tests/verify_build_cluster.erl @@ -53,37 +53,37 @@ confirm(#rt_properties{nodes=Nodes}, _MD) -> wait_and_validate(Nodes), lager:info("taking Node 1 down"), - rt:stop(Node1), + rt_node:stop(Node1), ?assertEqual(ok, rt:wait_until_unpingable(Node1)), wait_and_validate(Nodes, [Node2, Node3, Node4]), lager:info("taking Node 2 down"), - rt:stop(Node2), + rt_node:stop(Node2), ?assertEqual(ok, rt:wait_until_unpingable(Node2)), wait_and_validate(Nodes, [Node3, Node4]), lager:info("bringing Node 1 up"), - rt:start(Node1), + rt_node:start(Node1), ok = rt:wait_until_pingable(Node1), wait_and_validate(Nodes, [Node1, Node3, Node4]), lager:info("bringing Node 2 up"), - rt:start(Node2), + rt_node:start(Node2), ok = rt:wait_until_pingable(Node2), wait_and_validate(Nodes), % leave 1, 2, and 3 lager:info("leaving Node 1"), - rt:leave(Node1), + rt_node:leave(Node1), ?assertEqual(ok, rt:wait_until_unpingable(Node1)), wait_and_validate([Node2, Node3, Node4]), lager:info("leaving Node 2"), - rt:leave(Node2), + rt_node:leave(Node2), ?assertEqual(ok, rt:wait_until_unpingable(Node2)), wait_and_validate([Node3, Node4]), lager:info("leaving Node 3"), - rt:leave(Node3), + rt_node:leave(Node3), ?assertEqual(ok, rt:wait_until_unpingable(Node3)), % verify 4 diff --git a/tests/verify_capabilities.erl b/tests/verify_capabilities.erl index d689a0229..2d6ee385c 100644 --- a/tests/verify_capabilities.erl +++ b/tests/verify_capabilities.erl @@ -230,7 +230,7 @@ confirm() -> lager:info("Verify vnode_routing == legacy"), assert_capability(CNode, {riak_core, vnode_routing}, legacy), - [rt:stop(Node) || Node <- Nodes], + [rt_node:stop(Node) || Node <- Nodes], pass. assert_capability(CNode, Capability, Value) -> diff --git a/tests/verify_counter_converge.erl b/tests/verify_counter_converge.erl index bad3f90b3..f91269e1c 100644 --- a/tests/verify_counter_converge.erl +++ b/tests/verify_counter_converge.erl @@ -50,7 +50,7 @@ confirm() -> lager:info("Partition cluster in two."), - PartInfo = rt:partition([N1, N2], [N3, N4]), + PartInfo = rt_node:partition([N1, N2], [N3, N4]), %% increment one side increment_counter(C1, Key, 5), @@ -68,7 +68,7 @@ confirm() -> %% heal lager:info("Heal and check merged values"), - ok = rt:heal(PartInfo), + ok = rt_node:heal(PartInfo), ok = rt:wait_for_cluster_service(Nodes, riak_kv), %% verify all nodes agree diff --git a/tests/verify_down.erl b/tests/verify_down.erl index efbf79d0d..488b65cb0 100644 --- a/tests/verify_down.erl +++ b/tests/verify_down.erl @@ -34,7 +34,7 @@ confirm() -> %% Shutdown node2 lager:info("Stopping ~p", [Node2]), - rt:stop(Node2), + rt_node:stop(Node2), ?assertEqual(ok, rt:wait_until_unpingable(Node2)), Remaining = Nodes -- [Node2], @@ -49,7 +49,7 @@ confirm() -> %% Mark node2 as down and wait for ring convergence lager:info("Mark ~p as down", [Node2]), - rt:down(Node1, Node2), + rt_node:down(Node1, Node2), ?assertEqual(ok, rt:wait_until_ring_converged(Remaining)), [?assertEqual(down, rt:status_of_according_to(Node2, Node)) || Node <- Remaining], @@ -58,7 +58,7 @@ confirm() -> %% Restart node2 and wait for ring convergence lager:info("Restart ~p and wait for ring convergence", [Node2]), - rt:start(Node2), + rt_node:start(Node2), ?assertEqual(ok, rt:wait_until_nodes_ready([Node2])), ?assertEqual(ok, rt:wait_until_ring_converged(Nodes)), diff --git a/tests/verify_dt_context.erl b/tests/verify_dt_context.erl index af81a6c67..4a355962f 100644 --- a/tests/verify_dt_context.erl +++ b/tests/verify_dt_context.erl @@ -72,7 +72,7 @@ confirm() -> lager:info("Partition cluster in two."), - PartInfo = rt:partition([N1], [N2]), + PartInfo = rt_node:partition([N1], [N2]), lager:info("Modify data on side 1"), %% Modify one side @@ -135,7 +135,7 @@ confirm() -> %% Check both sides %% heal lager:info("Heal and check merged values"), - ok = rt:heal(PartInfo), + ok = rt_node:heal(PartInfo), ok = rt:wait_for_cluster_service(Nodes, riak_kv), %% verify all nodes agree diff --git a/tests/verify_dt_converge.erl b/tests/verify_dt_converge.erl index a8d3dcf4e..6656e9edd 100644 --- a/tests/verify_dt_converge.erl +++ b/tests/verify_dt_converge.erl @@ -72,7 +72,7 @@ confirm() -> lager:info("Partition cluster in two."), - PartInfo = rt:partition([N1, N2], [N3, N4]), + PartInfo = rt_node:partition([N1, N2], [N3, N4]), lager:info("Modify data on side 1"), %% Modify one side @@ -108,7 +108,7 @@ confirm() -> %% heal lager:info("Heal and check merged values"), - ok = rt:heal(PartInfo), + ok = rt_node:heal(PartInfo), ok = rt:wait_for_cluster_service(Nodes, riak_kv), %% verify all nodes agree diff --git a/tests/verify_listkeys.erl b/tests/verify_listkeys.erl index b7cbe73f7..2a51f2063 100644 --- a/tests/verify_listkeys.erl +++ b/tests/verify_listkeys.erl @@ -57,17 +57,17 @@ confirm() -> check_it_all(Nodes, http), lager:info("Stopping Node1"), - rt:stop(Node1), + rt_node:stop(Node1), rt:wait_until_unpingable(Node1), %% Stop current node, restart previous node, verify lists:foldl(fun(Node, Prev) -> lager:info("Stopping Node ~p", [Node]), - rt:stop(Node), + rt_node:stop(Node), rt:wait_until_unpingable(Node), lager:info("Starting Node ~p", [Prev]), - rt:start(Prev), + rt_node:start(Prev), UpNodes = Nodes -- [Node], lager:info("Waiting for riak_kv service to be ready in ~p", [Prev]), rt:wait_for_cluster_service(UpNodes, riak_kv), @@ -78,11 +78,11 @@ confirm() -> end, Node1, [Node2, Node3, Node4]), lager:info("Stopping Node2"), - rt:stop(Node2), + rt_node:stop(Node2), rt:wait_until_unpingable(Node2), lager:info("Stopping Node3"), - rt:stop(Node3), + rt_node:stop(Node3), rt:wait_until_unpingable(Node3), lager:info("Only Node1 is up, so test should fail!"), diff --git a/tests/verify_membackend.erl b/tests/verify_membackend.erl index 2f05cec2c..9b81eed99 100644 --- a/tests/verify_membackend.erl +++ b/tests/verify_membackend.erl @@ -89,7 +89,7 @@ check_leave_and_expiry(NodeA, NodeB) -> ?assertEqual(ok, rt:wait_until_nodes_ready([NodeA, NodeB])), rt:wait_until_no_pending_changes([NodeA, NodeB]), - rt:leave(NodeB), + rt_node:leave(NodeB), rt:wait_until_unpingable(NodeB), ?assertEqual([], rt:systest_read(NodeA, 1, 100, ?BUCKET, 2)), diff --git a/tests/verify_mr_prereduce_node_down.erl b/tests/verify_mr_prereduce_node_down.erl index 62dd2f496..7df0ae261 100644 --- a/tests/verify_mr_prereduce_node_down.erl +++ b/tests/verify_mr_prereduce_node_down.erl @@ -47,7 +47,7 @@ confirm() -> [Primary,ToKill|_] = rt_cluster:build_cluster(NodeCount), %% We need one node down for this test - rt:stop(ToKill), + rt_node:stop(ToKill), %% store our test data Bucket = <<"verify_mr_prereduce_node_down">>, diff --git a/tests/verify_riak_lager.erl b/tests/verify_riak_lager.erl index b902048d6..a11b6a03e 100644 --- a/tests/verify_riak_lager.erl +++ b/tests/verify_riak_lager.erl @@ -33,9 +33,9 @@ confirm() -> ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), lager:info("Stopping that node"), - rt:stop(Node), + rt_node:stop(Node), - rt:start(Node), + rt_node:start(Node), lager:info("Checking for log files"), {ok, LagerHandlers} = rt:rpc_get_env(Node, [{lager, handlers}]), diff --git a/tests/verify_secondary_index_reformat.erl b/tests/verify_secondary_index_reformat.erl index 9235941c3..ced7f813e 100644 --- a/tests/verify_secondary_index_reformat.erl +++ b/tests/verify_secondary_index_reformat.erl @@ -74,7 +74,7 @@ confirm() -> lager:info("restarting node"), rt:stop_and_wait(Node), - rt:start(Node), + rt_node:start(Node), rt:wait_for_service(Node, riak_kv), check_fixed_index_statuses(Node, true), @@ -85,7 +85,7 @@ confirm() -> check_fixed_index_statuses(Node, false), rt:stop_and_wait(Node), - rt:start(Node), + rt_node:start(Node), rt:wait_for_service(Node, riak_kv), check_fixed_index_statuses(Node, false), diff --git a/tests/verify_staged_clustering.erl b/tests/verify_staged_clustering.erl index 6032f0bdf..9e61ce599 100644 --- a/tests/verify_staged_clustering.erl +++ b/tests/verify_staged_clustering.erl @@ -70,7 +70,7 @@ confirm() -> ?assertEqual(ok, rt:wait_until_unpingable(Node2)), lager:info("Restart ~p and re-join to cluster", [Node2]), - rt:start(Node2), + rt_node:start(Node2), stage_join(Node2, Node1), ?assertEqual(ok, rt:wait_until_all_members(Nodes)), From 06d58177b617fcf16081c84f7ecf18dea3a132f0 Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 15:52:31 -0400 Subject: [PATCH 09/17] Move rt:brutal_kill to rt_node. --- src/rt.erl | 13 ------------- src/rt_node.erl | 15 ++++++++++++++- tests/verify_handoff.erl | 4 ++-- tests/verify_vclock.erl | 2 +- 4 files changed, 17 insertions(+), 17 deletions(-) diff --git a/src/rt.erl b/src/rt.erl index e0bfe2717..f01cee00b 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -33,7 +33,6 @@ assert_nodes_agree_about_ownership/1, attach/2, attach_direct/2, - brutal_kill/1, capability/2, capability/3, check_singleton_node/1, @@ -533,18 +532,6 @@ is_partitioned(Node, Peers) -> AvailableNodes = rpc:call(Node, riak_core_node_watcher, nodes, [riak_kv]), lists:all(fun(Peer) -> not lists:member(Peer, AvailableNodes) end, Peers). -% when you just can't wait -brutal_kill(Node) -> - rt_cover:maybe_stop_on_node(Node), - lager:info("Killing node ~p", [Node]), - OSPidToKill = rpc:call(Node, os, getpid, []), - %% try a normal kill first, but set a timer to - %% kill -9 after 5 seconds just in case - rpc:cast(Node, timer, apply_after, - [5000, os, cmd, [io_lib:format("kill -9 ~s", [OSPidToKill])]]), - rpc:cast(Node, os, cmd, [io_lib:format("kill -15 ~s", [OSPidToKill])]), - ok. - capability(Node, all) -> rpc:call(Node, riak_core_capability, all, []); capability(Node, Capability) -> diff --git a/src/rt_node.erl b/src/rt_node.erl index 6a10664c1..ee20b034e 100644 --- a/src/rt_node.erl +++ b/src/rt_node.erl @@ -37,7 +37,8 @@ down/2, heal/1, partition/2, - remove/2]). + remove/2, + brutal_kill/1]). -define(HARNESS, (rt_config:get(rt_harness))). @@ -171,3 +172,15 @@ heal({_NewCookie, OldCookie, P1, P2}) -> rt:wait_until_connected(Cluster), {_GN, []} = rpc:sbcast(Cluster, riak_core_node_watcher, broadcast), ok. + +% when you just can't wait +brutal_kill(Node) -> + rt_cover:maybe_stop_on_node(Node), + lager:info("Killing node ~p", [Node]), + OSPidToKill = rpc:call(Node, os, getpid, []), + %% try a normal kill first, but set a timer to + %% kill -9 after 5 seconds just in case + rpc:cast(Node, timer, apply_after, + [5000, os, cmd, [io_lib:format("kill -9 ~s", [OSPidToKill])]]), + rpc:cast(Node, os, cmd, [io_lib:format("kill -15 ~s", [OSPidToKill])]), + ok. diff --git a/tests/verify_handoff.erl b/tests/verify_handoff.erl index 3a457412c..16fca5c81 100644 --- a/tests/verify_handoff.erl +++ b/tests/verify_handoff.erl @@ -94,11 +94,11 @@ run_test(TestMode, NTestItems, NTestNodes, HandoffEncoding) -> %% Prepare for the next call to our test (we aren't polite about it, it's faster that way): lager:info("Bringing down test nodes."), - lists:foreach(fun(N) -> rt:brutal_kill(N) end, TestNodes), + lists:foreach(fun(N) -> rt_node:brutal_kill(N) end, TestNodes), %% The "root" node can't leave() since it's the only node left: lager:info("Stopping root node."), - rt:brutal_kill(RootNode). + rt_node:brutal_kill(RootNode). %% See if we get the same data back from our new nodes as we put into the root node: test_handoff(RootNode, NewNode, NTestItems) -> diff --git a/tests/verify_vclock.erl b/tests/verify_vclock.erl index cf7c89268..44ec38d9a 100644 --- a/tests/verify_vclock.erl +++ b/tests/verify_vclock.erl @@ -118,7 +118,7 @@ force_encoding(Node, EncodingMethod) -> end. stopall(Nodes) -> - lists:foreach(fun(N) -> rt:brutal_kill(N) end, Nodes). + lists:foreach(fun(N) -> rt_node:brutal_kill(N) end, Nodes). make_kv(N, VSuffix) -> K = <>, From 6d46dc1fd76080c844533ba6123c33b9b0de28fe Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 18:16:06 -0400 Subject: [PATCH 10/17] Move ring-related rt functions to rt_ring; some cleanup. --- src/rt.erl | 98 +--------------- src/rt_cluster.erl | 8 +- src/rt_cs_dev.erl | 2 +- src/rt_ring.erl | 114 +++++++++++++++++++ src/rtdev.erl | 2 +- src/rtperf.erl | 2 +- src/rtssh.erl | 2 +- tests/overload.erl | 2 +- tests/partition_repair.erl | 2 +- tests/replication/repl_aae_fullsync.erl | 2 +- tests/replication/repl_location_failures.erl | 2 +- tests/verify_bitcask_tombstone2_upgrade.erl | 2 +- tests/verify_staged_clustering.erl | 6 +- 13 files changed, 133 insertions(+), 111 deletions(-) create mode 100644 src/rt_ring.erl diff --git a/src/rt.erl b/src/rt.erl index f01cee00b..c11aef49f 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -30,14 +30,11 @@ -compile(export_all). -export([ admin/2, - assert_nodes_agree_about_ownership/1, attach/2, attach_direct/2, capability/2, capability/3, - check_singleton_node/1, check_ibrowse/0, - claimant_according_to/1, cmd/1, cmd/2, connection_info/1, @@ -49,17 +46,12 @@ get_ip/1, get_node_logs/0, get_replica/5, - get_ring/1, get_version/0, is_mixed_cluster/1, is_pingable/1, load_modules_on_nodes/2, log_to_nodes/2, log_to_nodes/3, - members_according_to/1, - nearest_ringsize/1, - owners_according_to/1, - partitions_for_node/1, pmap/2, post_result/2, priv_dir/0, @@ -72,7 +64,6 @@ spawn_cmd/1, spawn_cmd/2, search_cmd/2, - status_of_according_to/2, str/2, systest_read/2, systest_read/3, @@ -181,7 +172,7 @@ connection_info(Nodes) when is_list(Nodes) -> [ {Node, connection_info(Node)} || Node <- Nodes]. maybe_wait_for_changes(Node) -> - Ring = get_ring(Node), + Ring = rt_ring:get_ring(Node), Changes = riak_core_ring:pending_changes(Ring), Joining = riak_core_ring:members(Ring, [joining]), if Changes =:= [] -> @@ -432,7 +423,7 @@ wait_until_all_members(Nodes, ExpectedMembers) -> lager:info("Wait until all members ~p ~p", [Nodes, ExpectedMembers]), S1 = ordsets:from_list(ExpectedMembers), F = fun(Node) -> - case members_according_to(Node) of + case rt_ring:members_according_to(Node) of {badrpc, _} -> false; ReportedMembers -> @@ -562,7 +553,7 @@ cap_equal(Val, Cap) -> wait_until_owners_according_to(Node, Nodes) -> SortedNodes = lists:usort(Nodes), F = fun(N) -> - owners_according_to(N) =:= SortedNodes + rt_ring:owners_according_to(N) =:= SortedNodes end, ?assertEqual(ok, wait_until(Node, F)), ok. @@ -654,89 +645,6 @@ index_built_fun(Node) -> %%% Ring Functions %%%=================================================================== -%% @doc Ensure that the specified node is a singleton node/cluster -- a node -%% that owns 100% of the ring. -check_singleton_node(Node) -> - lager:info("Check ~p is a singleton", [Node]), - {ok, Ring} = rpc:call(Node, riak_core_ring_manager, get_raw_ring, []), - Owners = lists:usort([Owner || {_Idx, Owner} <- riak_core_ring:all_owners(Ring)]), - ?assertEqual([Node], Owners), - ok. - -% @doc Get list of partitions owned by node (primary). -partitions_for_node(Node) -> - Ring = get_ring(Node), - [Idx || {Idx, Owner} <- riak_core_ring:all_owners(Ring), Owner == Node]. - -%% @doc Get the raw ring for `Node'. -get_ring(Node) -> - {ok, Ring} = rpc:call(Node, riak_core_ring_manager, get_raw_ring, []), - Ring. - -assert_nodes_agree_about_ownership(Nodes) -> - ?assertEqual(ok, wait_until_ring_converged(Nodes)), - ?assertEqual(ok, wait_until_all_members(Nodes)), - [ ?assertEqual({Node, Nodes}, {Node, owners_according_to(Node)}) || Node <- Nodes]. - -%% @doc Return a list of nodes that own partitions according to the ring -%% retrieved from the specified node. -owners_according_to(Node) -> - case rpc:call(Node, riak_core_ring_manager, get_raw_ring, []) of - {ok, Ring} -> - Owners = [Owner || {_Idx, Owner} <- riak_core_ring:all_owners(Ring)], - lists:usort(Owners); - {badrpc, _}=BadRpc -> - BadRpc - end. - -%% @doc Return a list of cluster members according to the ring retrieved from -%% the specified node. -members_according_to(Node) -> - case rpc:call(Node, riak_core_ring_manager, get_raw_ring, []) of - {ok, Ring} -> - Members = riak_core_ring:all_members(Ring), - Members; - {badrpc, _}=BadRpc -> - BadRpc - end. - -%% @doc Return an appropriate ringsize for the node count passed -%% in. 24 is the number of cores on the bigger intel machines, but this -%% may be too large for the single-chip machines. -nearest_ringsize(Count) -> - nearest_ringsize(Count * 24, 2). - -nearest_ringsize(Count, Power) -> - case Count < trunc(Power * 0.9) of - true -> - Power; - false -> - nearest_ringsize(Count, Power * 2) - end. - -%% @doc Return the cluster status of `Member' according to the ring -%% retrieved from `Node'. -status_of_according_to(Member, Node) -> - case rpc:call(Node, riak_core_ring_manager, get_raw_ring, []) of - {ok, Ring} -> - Status = riak_core_ring:member_status(Ring, Member), - Status; - {badrpc, _}=BadRpc -> - BadRpc - end. - -%% @doc Return a list of nodes that own partitions according to the ring -%% retrieved from the specified node. -claimant_according_to(Node) -> - case rpc:call(Node, riak_core_ring_manager, get_raw_ring, []) of - {ok, Ring} -> - Claimant = riak_core_ring:claimant(Ring), - Claimant; - {badrpc, _}=BadRpc -> - BadRpc - end. - - %%%=================================================================== %%% Basic Read/Write Functions %%%=================================================================== diff --git a/src/rt_cluster.erl b/src/rt_cluster.erl index cab1cd92c..a3ccf4ccb 100644 --- a/src/rt_cluster.erl +++ b/src/rt_cluster.erl @@ -151,7 +151,7 @@ build_cluster(NumNodes, Versions, InitialConfig) -> join_cluster(Nodes) -> %% Ensure each node owns 100% of it's own ring - [?assertEqual([Node], rt:owners_according_to(Node)) || Node <- Nodes], + [?assertEqual([Node], rt_ring:owners_according_to(Node)) || Node <- Nodes], %% Join nodes [Node1|OtherNodes] = Nodes, @@ -162,8 +162,8 @@ join_cluster(Nodes) -> _ -> %% ok do a staged join and then commit it, this eliminates the %% large amount of redundant handoff done in a sequential join - [rt:staged_join(Node, Node1) || Node <- OtherNodes], - rt:plan_and_commit(Node1), + [rt_node:staged_join(Node, Node1) || Node <- OtherNodes], + rt_node:plan_and_commit(Node1), try_nodes_ready(Nodes, 3, 500) end, @@ -176,7 +176,7 @@ join_cluster(Nodes) -> try_nodes_ready([Node1 | _Nodes], 0, _SleepMs) -> lager:info("Nodes not ready after initial plan/commit, retrying"), - rt:plan_and_commit(Node1); + rt_node:plan_and_commit(Node1); try_nodes_ready(Nodes, N, SleepMs) -> ReadyNodes = [Node || Node <- Nodes, rt:is_ready(Node) =:= true], case ReadyNodes of diff --git a/src/rt_cs_dev.erl b/src/rt_cs_dev.erl index c0b555c58..afd3f955f 100644 --- a/src/rt_cs_dev.erl +++ b/src/rt_cs_dev.erl @@ -281,7 +281,7 @@ deploy_nodes(NodeConfig) -> [ok = rt:wait_until_registered(N, riak_core_ring_manager) || N <- Nodes], %% Ensure nodes are singleton clusters - [ok = rt:check_singleton_node(?DEV(N)) || {N, Version} <- VersionMap, + [ok = rt_ring:check_singleton_node(?DEV(N)) || {N, Version} <- VersionMap, Version /= "0.14.2"], lager:info("Deployed nodes: ~p", [Nodes]), diff --git a/src/rt_ring.erl b/src/rt_ring.erl new file mode 100644 index 000000000..25b70eaa4 --- /dev/null +++ b/src/rt_ring.erl @@ -0,0 +1,114 @@ +%% ------------------------------------------------------------------- +%% +%% Copyright (c) 2013-2014 Basho Technologies, Inc. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- +-module(rt_ring). +-include("rt.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-export([assert_nodes_agree_about_ownership/1, + check_singleton_node/1, + claimant_according_to/1, + get_ring/1, + members_according_to/1, + nearest_ringsize/1, + owners_according_to/1, + partitions_for_node/1, + status_of_according_to/2]). + +%% @doc Ensure that the specified node is a singleton node/cluster -- a node +%% that owns 100% of the ring. +check_singleton_node(Node) -> + lager:info("Check ~p is a singleton", [Node]), + {ok, Ring} = rpc:call(Node, riak_core_ring_manager, get_raw_ring, []), + Owners = lists:usort([Owner || {_Idx, Owner} <- riak_core_ring:all_owners(Ring)]), + ?assertEqual([Node], Owners), + ok. + +% @doc Get list of partitions owned by node (primary). +partitions_for_node(Node) -> + Ring = get_ring(Node), + [Idx || {Idx, Owner} <- riak_core_ring:all_owners(Ring), Owner == Node]. + +%% @doc Get the raw ring for `Node'. +get_ring(Node) -> + {ok, Ring} = rpc:call(Node, riak_core_ring_manager, get_raw_ring, []), + Ring. + +assert_nodes_agree_about_ownership(Nodes) -> + ?assertEqual(ok, rt:wait_until_ring_converged(Nodes)), + ?assertEqual(ok, rt:wait_until_all_members(Nodes)), + [ ?assertEqual({Node, Nodes}, {Node, owners_according_to(Node)}) || Node <- Nodes]. + +%% @doc Return a list of nodes that own partitions according to the ring +%% retrieved from the specified node. +owners_according_to(Node) -> + case rpc:call(Node, riak_core_ring_manager, get_raw_ring, []) of + {ok, Ring} -> + Owners = [Owner || {_Idx, Owner} <- riak_core_ring:all_owners(Ring)], + lists:usort(Owners); + {badrpc, _}=BadRpc -> + BadRpc + end. + +%% @doc Return a list of cluster members according to the ring retrieved from +%% the specified node. +members_according_to(Node) -> + case rpc:call(Node, riak_core_ring_manager, get_raw_ring, []) of + {ok, Ring} -> + Members = riak_core_ring:all_members(Ring), + Members; + {badrpc, _}=BadRpc -> + BadRpc + end. + +%% @doc Return an appropriate ringsize for the node count passed +%% in. 24 is the number of cores on the bigger intel machines, but this +%% may be too large for the single-chip machines. +nearest_ringsize(Count) -> + nearest_ringsize(Count * 24, 2). + +nearest_ringsize(Count, Power) -> + case Count < trunc(Power * 0.9) of + true -> + Power; + false -> + nearest_ringsize(Count, Power * 2) + end. + +%% @doc Return the cluster status of `Member' according to the ring +%% retrieved from `Node'. +status_of_according_to(Member, Node) -> + case rpc:call(Node, riak_core_ring_manager, get_raw_ring, []) of + {ok, Ring} -> + Status = riak_core_ring:member_status(Ring, Member), + Status; + {badrpc, _}=BadRpc -> + BadRpc + end. + +%% @doc Return a list of nodes that own partitions according to the ring +%% retrieved from the specified node. +claimant_according_to(Node) -> + case rpc:call(Node, riak_core_ring_manager, get_raw_ring, []) of + {ok, Ring} -> + Claimant = riak_core_ring:claimant(Ring), + Claimant; + {badrpc, _}=BadRpc -> + BadRpc + end. diff --git a/src/rtdev.erl b/src/rtdev.erl index d3d9bbc09..a3bd4b1aa 100644 --- a/src/rtdev.erl +++ b/src/rtdev.erl @@ -408,7 +408,7 @@ deploy_nodes(NodeConfig) -> [ok = rt:wait_until_registered(N, riak_core_ring_manager) || N <- Nodes], %% Ensure nodes are singleton clusters - [ok = rt:check_singleton_node(?DEV(N)) || {N, Version} <- VersionMap, + [ok = rt_ring:check_singleton_node(?DEV(N)) || {N, Version} <- VersionMap, Version /= "0.14.2"], lager:info("Deployed nodes: ~p", [Nodes]), diff --git a/src/rtperf.erl b/src/rtperf.erl index bba2f13d9..14fe6f43c 100644 --- a/src/rtperf.erl +++ b/src/rtperf.erl @@ -384,7 +384,7 @@ deploy_nodes(NodeConfig, Hosts) -> [ok = rt:wait_until_registered(N, riak_core_ring_manager) || N <- Nodes], %% Ensure nodes are singleton clusters - [ok = rt:check_singleton_node(N) || {N, Version} <- VersionMap, + [ok = rt_ring:check_singleton_node(N) || {N, Version} <- VersionMap, Version /= "0.14.2"], Nodes. diff --git a/src/rtssh.erl b/src/rtssh.erl index 1ee477c65..3d7f19372 100644 --- a/src/rtssh.erl +++ b/src/rtssh.erl @@ -196,7 +196,7 @@ deploy_nodes(NodeConfig, Hosts) -> [ok = rt:wait_until_registered(N, riak_core_ring_manager) || N <- Nodes], %% Ensure nodes are singleton clusters - [ok = rt:check_singleton_node(N) || {N, Version} <- VersionMap, + [ok = rt_ring:check_singleton_node(N) || {N, Version} <- VersionMap, Version /= "0.14.2"], Nodes. diff --git a/tests/overload.erl b/tests/overload.erl index 41f092283..cd3e60ffe 100644 --- a/tests/overload.erl +++ b/tests/overload.erl @@ -36,7 +36,7 @@ confirm() -> Nodes = rt_cluster:build_cluster(2, Config), [_Node1, Node2] = Nodes, - Ring = rt:get_ring(Node2), + Ring = rt_ring:get_ring(Node2), Hash = riak_core_util:chash_std_keyfun({?BUCKET, ?KEY}), PL = lists:sublist(riak_core_ring:preflist(Hash, Ring), 3), Victim = hd([Idx || {Idx, Node} <- PL, diff --git a/tests/partition_repair.erl b/tests/partition_repair.erl index b3223eb3a..3d21f4c84 100644 --- a/tests/partition_repair.erl +++ b/tests/partition_repair.erl @@ -94,7 +94,7 @@ confirm() -> ?assertCmd("rm -rf " ++ base_stash_path()), %% need to load the module so riak can see the fold fun rt:load_modules_on_nodes([?MODULE], Nodes), - Ring = rt:get_ring(hd(Nodes)), + Ring = rt_ring:get_ring(hd(Nodes)), Owners = riak_core_ring:all_owners(Ring), [stash_data(riak_search, Owner) || Owner <- Owners], diff --git a/tests/replication/repl_aae_fullsync.erl b/tests/replication/repl_aae_fullsync.erl index 5723f2235..4b817775f 100644 --- a/tests/replication/repl_aae_fullsync.erl +++ b/tests/replication/repl_aae_fullsync.erl @@ -521,7 +521,7 @@ validate_intercepted_fullsync(InterceptTarget, NumIndicies = length(rpc:call(InterceptTarget, riak_core_ring, my_indices, - [rt:get_ring(InterceptTarget)])), + [rt_ring:get_ring(InterceptTarget)])), lager:info("~p owns ~p indices", [InterceptTarget, NumIndicies]), diff --git a/tests/replication/repl_location_failures.erl b/tests/replication/repl_location_failures.erl index 7cc5ce186..7c8178dff 100644 --- a/tests/replication/repl_location_failures.erl +++ b/tests/replication/repl_location_failures.erl @@ -81,7 +81,7 @@ confirm() -> BIndicies = length(rpc:call(LeaderB, riak_core_ring, my_indices, - [rt:get_ring(LeaderB)])), + [rt_ring:get_ring(LeaderB)])), lager:warning("BIndicies: ~p", [BIndicies]), diff --git a/tests/verify_bitcask_tombstone2_upgrade.erl b/tests/verify_bitcask_tombstone2_upgrade.erl index 5a999bcc4..fe153eb7f 100644 --- a/tests/verify_bitcask_tombstone2_upgrade.erl +++ b/tests/verify_bitcask_tombstone2_upgrade.erl @@ -43,7 +43,7 @@ list_bitcask_files(Nodes) -> list_node_bitcask_files(Node) -> % Gather partitions owned, list *.bitcask.data on each. - Partitions = rt:partitions_for_node(Node), + Partitions = rt_ring:partitions_for_node(Node), {ok, DataDir} = rt:rpc_get_env(Node, [{bitcask, data_root}]), [begin IdxStr = integer_to_list(Idx), diff --git a/tests/verify_staged_clustering.erl b/tests/verify_staged_clustering.erl index 9e61ce599..7983abc69 100644 --- a/tests/verify_staged_clustering.erl +++ b/tests/verify_staged_clustering.erl @@ -47,7 +47,7 @@ confirm() -> lager:info("Ensure that ~p now own all partitions", [Nodes123]), ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes123)), ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes123)), - rt:assert_nodes_agree_about_ownership(Nodes123), + rt_ring:assert_nodes_agree_about_ownership(Nodes123), lager:info("Join ~p to the cluster", [Node4]), stage_join(Node4, Node1), @@ -64,7 +64,7 @@ confirm() -> lager:info("Ensure that ~p now own all partitions", [Nodes134]), ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes134)), ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes134)), - rt:assert_nodes_agree_about_ownership(Nodes134), + rt_ring:assert_nodes_agree_about_ownership(Nodes134), lager:info("Verify that ~p shutdown after being replaced", [Node2]), ?assertEqual(ok, rt:wait_until_unpingable(Node2)), @@ -85,7 +85,7 @@ confirm() -> lager:info("Ensure that ~p now own all partitions", [Nodes124]), ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes124)), ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes124)), - rt:assert_nodes_agree_about_ownership(Nodes124), + rt_ring:assert_nodes_agree_about_ownership(Nodes124), lager:info("Stage leave of ~p", [Node2]), stage_leave(Node1, Node2), From 77daf7ff4f443d366c211e17adfb4b41eccd08e9 Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 18:32:55 -0400 Subject: [PATCH 11/17] Move command-line oriented rt functions to rt_cmd_line. --- src/rt.erl | 60 -------------- src/rt_cmd_line.erl | 78 +++++++++++++++++++ tests/basic_command_line.erl | 28 +++---- tests/replication/repl_fs_stat_caching.erl | 2 +- .../replication2_console_tests.erl | 2 +- tests/riak_admin_console_tests.erl | 4 +- tests/riaknostic_rt.erl | 10 +-- tests/verify_2i_aae.erl | 4 +- tests/verify_backup_restore.erl | 6 +- tests/verify_staged_clustering.erl | 16 ++-- 10 files changed, 114 insertions(+), 96 deletions(-) create mode 100644 src/rt_cmd_line.erl diff --git a/src/rt.erl b/src/rt.erl index c11aef49f..a57f310bf 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -29,16 +29,12 @@ -compile(export_all). -export([ - admin/2, - attach/2, - attach_direct/2, capability/2, capability/3, check_ibrowse/0, cmd/1, cmd/2, connection_info/1, - console/2, create_and_activate_bucket_type/3, enable_search_hook/2, expect_in_log/2, @@ -55,15 +51,12 @@ pmap/2, post_result/2, priv_dir/0, - riak/2, - riak_repl/2, rpc_get_env/2, setup_harness/2, setup_log_capture/1, stream_cmd/1, stream_cmd/2, spawn_cmd/1, spawn_cmd/2, - search_cmd/2, str/2, systest_read/2, systest_read/3, @@ -819,59 +812,6 @@ get_replica(Node, Bucket, Key, I, N) -> ?assert(false) end. -%%%=================================================================== -%%% PBC & HTTPC Functions -%%%=================================================================== - - - -%%%=================================================================== -%%% Command Line Functions -%%%=================================================================== - -%% @doc Call 'bin/riak-admin' command on `Node' with arguments `Args' -admin(Node, Args) -> - ?HARNESS:admin(Node, Args). - -%% @doc Call 'bin/riak' command on `Node' with arguments `Args' -riak(Node, Args) -> - ?HARNESS:riak(Node, Args). - - -%% @doc Call 'bin/riak-repl' command on `Node' with arguments `Args' -riak_repl(Node, Args) -> - ?HARNESS:riak_repl(Node, Args). - -search_cmd(Node, Args) -> - {ok, Cwd} = file:get_cwd(), - rpc:call(Node, riak_search_cmd, command, [[Cwd | Args]]). - -%% @doc Runs `riak attach' on a specific node, and tests for the expected behavoir. -%% Here's an example: ``` -%% rt:attach(Node, [{expect, "erlang.pipe.1 \(^D to exit\)"}, -%% {send, "riak_core_ring_manager:get_my_ring()."}, -%% {expect, "dict,"}, -%% {send, [4]}]), %% 4 = Ctrl + D''' -%% `{expect, String}' scans the output for the existance of the String. -%% These tuples are processed in order. -%% -%% `{send, String}' sends the string to the console. -%% Once a send is encountered, the buffer is discarded, and the next -%% expect will process based on the output following the sent data. -%% -attach(Node, Expected) -> - ?HARNESS:attach(Node, Expected). - -%% @doc Runs 'riak attach-direct' on a specific node -%% @see rt:attach/2 -attach_direct(Node, Expected) -> - ?HARNESS:attach_direct(Node, Expected). - -%% @doc Runs `riak console' on a specific node -%% @see rt:attach/2 -console(Node, Expected) -> - ?HARNESS:console(Node, Expected). - %%%=================================================================== %%% Search %%%=================================================================== diff --git a/src/rt_cmd_line.erl b/src/rt_cmd_line.erl new file mode 100644 index 000000000..d4022f1ae --- /dev/null +++ b/src/rt_cmd_line.erl @@ -0,0 +1,78 @@ +%% ------------------------------------------------------------------- +%% +%% Copyright (c) 2014 Basho Technologies, Inc. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +-module(rt_cmd_line). +-include_lib("eunit/include/eunit.hrl"). + +-export([admin/2, + riak/2, + riak_repl/2, + search_cmd/2, + attach/2, + attach_direct/2, + console/2 + ]). + +-include("rt.hrl"). + +-define(HARNESS, (rt_config:get(rt_harness))). + +%% @doc Call 'bin/riak-admin' command on `Node' with arguments `Args' +admin(Node, Args) -> + ?HARNESS:admin(Node, Args). + +%% @doc Call 'bin/riak' command on `Node' with arguments `Args' +riak(Node, Args) -> + ?HARNESS:riak(Node, Args). + + +%% @doc Call 'bin/riak-repl' command on `Node' with arguments `Args' +riak_repl(Node, Args) -> + ?HARNESS:riak_repl(Node, Args). + +search_cmd(Node, Args) -> + {ok, Cwd} = file:get_cwd(), + rpc:call(Node, riak_search_cmd, command, [[Cwd | Args]]). + +%% @doc Runs `riak attach' on a specific node, and tests for the expected behavoir. +%% Here's an example: ``` +%% rt_cmd_line:attach(Node, [{expect, "erlang.pipe.1 \(^D to exit\)"}, +%% {send, "riak_core_ring_manager:get_my_ring()."}, +%% {expect, "dict,"}, +%% {send, [4]}]), %% 4 = Ctrl + D''' +%% `{expect, String}' scans the output for the existance of the String. +%% These tuples are processed in order. +%% +%% `{send, String}' sends the string to the console. +%% Once a send is encountered, the buffer is discarded, and the next +%% expect will process based on the output following the sent data. +%% +attach(Node, Expected) -> + ?HARNESS:attach(Node, Expected). + +%% @doc Runs 'riak attach-direct' on a specific node +%% @see rt_cmd_line:attach/2 +attach_direct(Node, Expected) -> + ?HARNESS:attach_direct(Node, Expected). + +%% @doc Runs `riak console' on a specific node +%% @see rt_cmd_line:attach/2 +console(Node, Expected) -> + ?HARNESS:console(Node, Expected). diff --git a/tests/basic_command_line.erl b/tests/basic_command_line.erl index 398876c32..eda201190 100644 --- a/tests/basic_command_line.erl +++ b/tests/basic_command_line.erl @@ -56,7 +56,7 @@ confirm(#rt_properties{nodes=Nodes}, _MD) -> console_up_test(Node) -> lager:info("Node is already up, `riak console` should fail"), - {ok, ConsoleFail} = rt:riak(Node, ["console"]), + {ok, ConsoleFail} = rt_cmd_line:riak(Node, ["console"]), ?assert(rt:str(ConsoleFail, "Node is already running")), ok. @@ -65,7 +65,7 @@ console_test(Node) -> lager:info("Testing riak console on ~s", [Node]), %% Stop node, to test console working - rt:console(Node, [{expect, "\(abort with ^G\)"}, + rt_cmd_line:console(Node, [{expect, "\(abort with ^G\)"}, {send, "riak_core_ring_manager:get_my_ring()."}, {expect, "dict,"}, {send, "q()."}, @@ -76,7 +76,7 @@ console_test(Node) -> start_up_test(Node) -> %% Try starting again and check you get the node is already running message lager:info("Testing riak start now will return 'already running'"), - {ok, StartOut} = rt:riak(Node, ["start"]), + {ok, StartOut} = rt_cmd_line:riak(Node, ["start"]), ?assert(rt:str(StartOut, "Node is already running!")), ok. @@ -85,7 +85,7 @@ start_test(Node) -> %% Test starting with /bin/riak start lager:info("Testing riak start works on ~s", [Node]), - {ok, StartPass} = rt:riak(Node, ["start"]), + {ok, StartPass} = rt_cmd_line:riak(Node, ["start"]), ?assertMatch(StartPass, ""), rt:stop_and_wait(Node), ok. @@ -93,7 +93,7 @@ start_test(Node) -> stop_test(Node) -> ?assert(rt:is_pingable(Node)), - {ok, "ok\n"} = rt:riak(Node, "stop"), + {ok, "ok\n"} = rt_cmd_line:riak(Node, "stop"), ?assertNot(rt:is_pingable(Node)), ok. @@ -106,27 +106,27 @@ ping_up_test(Node) -> %% ping / pong %% rt_node:start_and_wait(Node), lager:info("Node up, should ping"), - {ok, PongOut} = rt:riak(Node, ["ping"]), + {ok, PongOut} = rt_cmd_line:riak(Node, ["ping"]), ?assert(rt:str(PongOut, "pong")), ok. ping_down_test(Node) -> %% ping / pang lager:info("Node down, should pang"), - {ok, PangOut} = rt:riak(Node, ["ping"]), + {ok, PangOut} = rt_cmd_line:riak(Node, ["ping"]), ?assert(rt:str(PangOut, "not responding to pings")), ok. attach_down_test(Node) -> lager:info("Testing riak attach while down"), - {ok, AttachOut} = rt:riak(Node, ["attach"]), + {ok, AttachOut} = rt_cmd_line:riak(Node, ["attach"]), ?assert(rt:str(AttachOut, "Node is not running!")), ok. attach_direct_up_test(Node) -> lager:info("Testing riak attach-direct"), - rt:attach_direct(Node, [{expect, "\(^D to exit\)"}, + rt_cmd_line:attach_direct(Node, [{expect, "\(^D to exit\)"}, {send, "riak_core_ring_manager:get_my_ring()."}, {expect, "dict,"}, {send, [4]}]), %% 4 = Ctrl + D @@ -134,14 +134,14 @@ attach_direct_up_test(Node) -> attach_direct_down_test(Node) -> lager:info("Testing riak attach-direct while down"), - {ok, AttachOut} = rt:riak(Node, ["attach-direct"]), + {ok, AttachOut} = rt_cmd_line:riak(Node, ["attach-direct"]), ?assert(rt:str(AttachOut, "Node is not running!")), ok. status_up_test(Node) -> lager:info("Test riak-admin status on ~s", [Node]), - {ok, StatusOut} = rt:admin(Node, ["status"]), + {ok, StatusOut} = rt_cmd_line:admin(Node, ["status"]), io:format("Result of status: ~s", [StatusOut]), ?assert(rt:str(StatusOut, "1-minute stats")), @@ -151,19 +151,19 @@ status_up_test(Node) -> status_down_test(Node) -> lager:info("Test riak-admin status while down"), - {ok, StatusOut} = rt:admin(Node, ["status"]), + {ok, StatusOut} = rt_cmd_line:admin(Node, ["status"]), ?assert(rt:str(StatusOut, "Node is not running!")), ok. getpid_up_test(Node) -> lager:info("Test riak getpid on ~s", [Node]), - {ok, PidOut} = rt:riak(Node, ["getpid"]), + {ok, PidOut} = rt_cmd_line:riak(Node, ["getpid"]), ?assertNot(rt:str(PidOut, "")), ?assert(rt:str(PidOut, rpc:call(Node, os, getpid, []))), ok. getpid_down_test(Node) -> lager:info("Test riak getpid fails on ~s", [Node]), - {ok, PidOut} = rt:riak(Node, ["getpid"]), + {ok, PidOut} = rt_cmd_line:riak(Node, ["getpid"]), ?assert(rt:str(PidOut, "Node is not running!")), ok. diff --git a/tests/replication/repl_fs_stat_caching.erl b/tests/replication/repl_fs_stat_caching.erl index 6ecb655f0..ce03cd7bd 100644 --- a/tests/replication/repl_fs_stat_caching.erl +++ b/tests/replication/repl_fs_stat_caching.erl @@ -25,7 +25,7 @@ confirm() -> % status. {ok, Suspended} = suspend_an_fs_source(SrcCluster), lager:info("Suspended: ~p", [Suspended]), - {ok, Status} = rt:riak_repl(SrcLead, "status"), + {ok, Status} = rt_cmd_line:riak_repl(SrcLead, "status"), FailLine = "RPC to '" ++ atom_to_list(SrcLead) ++ "' failed: timeout\n", ?assertNotEqual(FailLine, Status), diff --git a/tests/replication/replication2_console_tests.erl b/tests/replication/replication2_console_tests.erl index c090bf9a1..5d5c55fae 100644 --- a/tests/replication/replication2_console_tests.erl +++ b/tests/replication/replication2_console_tests.erl @@ -118,6 +118,6 @@ confirm() -> check_cmd(Node, Cmd) -> lager:info("Testing riak-repl ~s on ~s", [Cmd, Node]), - {ok, Out} = rt:riak_repl(Node, [Cmd]), + {ok, Out} = rt_cmd_line:riak_repl(Node, [Cmd]), ?assertEqual("pass", Out). diff --git a/tests/riak_admin_console_tests.erl b/tests/riak_admin_console_tests.erl index 04bd8ea48..95ad3b421 100644 --- a/tests/riak_admin_console_tests.erl +++ b/tests/riak_admin_console_tests.erl @@ -233,7 +233,7 @@ confirm() -> check_admin_cmd(Node, Cmd) -> S = string:tokens(Cmd, " "), lager:info("Testing riak-admin ~s on ~s", [Cmd, Node]), - {ok, Out} = rt:admin(Node, S), + {ok, Out} = rt_cmd_line:admin(Node, S), ?assertEqual("pass", Out). %% Recently we've started calling riak_core_console twice from the @@ -242,5 +242,5 @@ check_admin_cmd(Node, Cmd) -> check_admin_cmd_2x(Node, Cmd) -> S = string:tokens(Cmd, " "), lager:info("Testing riak-admin ~s on ~s", [Cmd, Node]), - {ok, Out} = rt:admin(Node, S), + {ok, Out} = rt_cmd_line:admin(Node, S), ?assertEqual("passpass", Out). diff --git a/tests/riaknostic_rt.erl b/tests/riaknostic_rt.erl index 43fa03449..9193b3bf2 100644 --- a/tests/riaknostic_rt.erl +++ b/tests/riaknostic_rt.erl @@ -47,7 +47,7 @@ confirm() -> riaknostic_bootstrap(Node) -> lager:info("Check if riaknostic is installed"), - {ok, RiaknosticOut1} = rt:admin(Node, ["diag"]), + {ok, RiaknosticOut1} = rt_cmd_line:admin(Node, ["diag"]), riaknostic_install((rt:str(RiaknosticOut1, "is not present!")), Node). %% riaknostic is already installed, move along @@ -69,7 +69,7 @@ riaknostic_install(true, Node) -> check_riaknostic_execute(Node) -> %% Execute lager:info("** Check Riaknostic executes"), - {ok, RiaknosticOut} = rt:admin(Node, ["diag"]), + {ok, RiaknosticOut} = rt_cmd_line:admin(Node, ["diag"]), ?assertNot(rt:str(RiaknosticOut, "is not present!")), ?assertNot(rt:str(RiaknosticOut, "[debug]")), ok. @@ -78,7 +78,7 @@ check_riaknostic_execute(Node) -> check_riaknostic_usage(Node) -> %% Check usage message lager:info("** Run Riaknostic usage message"), - {ok, RiaknosticOut} = rt:admin(Node, ["diag", "--help"]), + {ok, RiaknosticOut} = rt_cmd_line:admin(Node, ["diag", "--help"]), ?assert(rt:str(RiaknosticOut, "Usage: riak-admin")), ok. @@ -86,7 +86,7 @@ check_riaknostic_usage(Node) -> check_riaknostic_command_list(Node) -> %% Check commands list lager:info("** Run Riaknostic commands list message"), - {ok, RiaknosticOut} = rt:admin(Node, ["diag", "--list"]), + {ok, RiaknosticOut} = rt_cmd_line:admin(Node, ["diag", "--list"]), ?assert(rt:str(RiaknosticOut, "Available diagnostic checks")), ?assert(rt:str(RiaknosticOut, " disk ")), ?assert(rt:str(RiaknosticOut, " dumps ")), @@ -102,7 +102,7 @@ check_riaknostic_command_list(Node) -> check_riaknostic_log_levels(Node) -> %% Check log levels lager:info("** Run Riaknostic with a different log level"), - {ok, RiaknosticOut} = rt:admin(Node, ["diag", "--level", "debug"]), + {ok, RiaknosticOut} = rt_cmd_line:admin(Node, ["diag", "--level", "debug"]), ?assert(rt:str(RiaknosticOut, "[debug]")), ok. diff --git a/tests/verify_2i_aae.erl b/tests/verify_2i_aae.erl index 5ac070c36..a2714e773 100644 --- a/tests/verify_2i_aae.erl +++ b/tests/verify_2i_aae.erl @@ -151,7 +151,7 @@ check_kill_repair(Node1) -> lager:info("Test that killing 2i repair works as desired"), spawn(fun() -> timer:sleep(1500), - rt:admin(Node1, ["repair-2i", "kill"]) + rt_cmd_line:admin(Node1, ["repair-2i", "kill"]) end), ExitStatus = run_2i_repair(Node1), case ExitStatus of @@ -168,7 +168,7 @@ check_kill_repair(Node1) -> run_2i_repair(Node1) -> lager:info("Run 2i AAE repair"), - ?assertMatch({ok, _}, rt:admin(Node1, ["repair-2i"])), + ?assertMatch({ok, _}, rt_cmd_line:admin(Node1, ["repair-2i"])), RepairPid = rpc:call(Node1, erlang, whereis, [riak_kv_2i_aae]), lager:info("Wait for repair process to finish"), Mon = monitor(process, RepairPid), diff --git a/tests/verify_backup_restore.erl b/tests/verify_backup_restore.erl index 5a94e17ff..95272ff59 100644 --- a/tests/verify_backup_restore.erl +++ b/tests/verify_backup_restore.erl @@ -82,7 +82,7 @@ confirm() -> lager:info("Backing up the data to ~p", [BackupFile]), Cookie = "riak", - rt:admin(Node0, ["backup", atom_to_list(Node0), Cookie, BackupFile, "all"]), + rt_cmd_line:admin(Node0, ["backup", atom_to_list(Node0), Cookie, BackupFile, "all"]), lager:info("Modifying data on cluster"), ModF = fun(N) -> @@ -114,7 +114,7 @@ confirm() -> verify_searches(PbcPid, Searches, 0), lager:info("Restoring from backup ~p", [BackupFile]), - rt:admin(Node0, ["restore", atom_to_list(Node0), Cookie, BackupFile]), + rt_cmd_line:admin(Node0, ["restore", atom_to_list(Node0), Cookie, BackupFile]), rt:wait_until_no_pending_changes(Nodes), %% When allow_mult=false, the mods overwrite the restored data. When @@ -150,7 +150,7 @@ confirm() -> verify_searches(PbcPid2, EmptySearches, 0), lager:info("Restoring from backup ~p again", [BackupFile]), - rt:admin(Node0, ["restore", atom_to_list(Node0), Cookie, BackupFile]), + rt_cmd_line:admin(Node0, ["restore", atom_to_list(Node0), Cookie, BackupFile]), rt:enable_search_hook(Node0, ?SEARCH_BUCKET), lager:info("Verifying data is back to original backup"), diff --git a/tests/verify_staged_clustering.erl b/tests/verify_staged_clustering.erl index 7983abc69..deda5b906 100644 --- a/tests/verify_staged_clustering.erl +++ b/tests/verify_staged_clustering.erl @@ -117,28 +117,28 @@ n(Atom) -> stage_join(Node, OtherNode) -> %% rpc:call(Node, riak_kv_console, staged_join, [[n(OtherNode)]]). - rt:admin(Node, ["cluster", "join", n(OtherNode)]). + rt_cmd_line:admin(Node, ["cluster", "join", n(OtherNode)]). stage_leave(Node, OtherNode) -> %% rpc:call(Node, riak_core_console, stage_leave, [[n(OtherNode)]]). - rt:admin(Node, ["cluster", "leave", n(OtherNode)]). + rt_cmd_line:admin(Node, ["cluster", "leave", n(OtherNode)]). stage_remove(Node, OtherNode) -> %% rpc:call(Node, riak_core_console, stage_remove, [[n(OtherNode)]]). - rt:admin(Node, ["cluster", "force-remove", n(OtherNode)]). + rt_cmd_line:admin(Node, ["cluster", "force-remove", n(OtherNode)]). stage_replace(Node, Node1, Node2) -> %% rpc:call(Node, riak_core_console, stage_replace, [[n(Node1), n(Node2)]]). - rt:admin(Node, ["cluster", "replace", n(Node1), n(Node2)]). + rt_cmd_line:admin(Node, ["cluster", "replace", n(Node1), n(Node2)]). stage_force_replace(Node, Node1, Node2) -> %% rpc:call(Node, riak_core_console, stage_force_replace, [[n(Node1), n(Node2)]]). - rt:admin(Node, ["cluster", "force-replace", n(Node1), n(Node2)]). + rt_cmd_line:admin(Node, ["cluster", "force-replace", n(Node1), n(Node2)]). print_staged(Node) -> %% rpc:call(Node, riak_core_console, print_staged, [[]]). F = fun(_) -> - {ok, StdOut} = rt:admin(Node, ["cluster", "plan"]), + {ok, StdOut} = rt_cmd_line:admin(Node, ["cluster", "plan"]), case StdOut of "Cannot" ++ _X -> false; _ -> true @@ -148,11 +148,11 @@ print_staged(Node) -> commit_staged(Node) -> %% rpc:call(Node, riak_core_console, commit_staged, [[]]). - rt:admin(Node, ["cluster", "commit"]). + rt_cmd_line:admin(Node, ["cluster", "commit"]). clear_staged(Node) -> %% rpc:call(Node, riak_core_console, clear_staged, [[]]). - rt:admin(Node, ["cluster", "clear"]). + rt_cmd_line:admin(Node, ["cluster", "clear"]). stage_join_rpc(Node, OtherNode) -> rpc:call(Node, riak_core, staged_join, [OtherNode]). From b24cad7badfbd3414617b7af13ad3777d089f6e1 Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 22:18:38 -0400 Subject: [PATCH 12/17] Move bucket types related functions from rt module to rt_bucket_types. --- src/rt.erl | 66 -------------- src/rt_bucket_types.erl | 88 +++++++++++++++++++ tests/bucket_types.erl | 8 +- tests/ensemble_basic3.erl | 2 +- tests/ensemble_basic4.erl | 2 +- tests/ensemble_interleave.erl | 2 +- tests/ensemble_remove_node2.erl | 2 +- tests/ensemble_sync.erl | 2 +- tests/ensemble_vnode_crash.erl | 2 +- tests/http_bucket_types.erl | 6 +- tests/http_security.erl | 8 +- tests/mapred_basic_compat.erl | 6 +- tests/pb_security.erl | 18 ++-- tests/replication/repl_bucket_types.erl | 18 ++-- .../repl_consistent_object_filter.erl | 12 +-- tests/verify_conditional_postcommit.erl | 4 +- tests/verify_dt_context.erl | 2 +- tests/verify_handoff.erl | 2 +- tests/verify_listkeys_eqcfsm.erl | 6 +- tests/yz_ensemble.erl | 2 +- 20 files changed, 140 insertions(+), 118 deletions(-) create mode 100644 src/rt_bucket_types.erl diff --git a/src/rt.erl b/src/rt.erl index a57f310bf..b5ede7d8e 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -35,7 +35,6 @@ cmd/1, cmd/2, connection_info/1, - create_and_activate_bucket_type/3, enable_search_hook/2, expect_in_log/2, get_deps/0, @@ -77,8 +76,6 @@ wait_until_aae_trees_built/1, wait_until_all_members/1, wait_until_all_members/2, - wait_until_bucket_props/3, - wait_until_bucket_type_visible/2, wait_until_capability/3, wait_until_capability/4, wait_until_connected/1, @@ -94,7 +91,6 @@ wait_until_status_ready/1, wait_until_transfers_complete/1, wait_until_unpingable/1, - wait_until_bucket_type_status/3, whats_up/0 ]). @@ -634,10 +630,6 @@ index_built_fun(Node) -> end end. -%%%=================================================================== -%%% Ring Functions -%%%=================================================================== - %%%=================================================================== %%% Basic Read/Write Functions %%%=================================================================== @@ -928,64 +920,6 @@ post_result(TestResult, #rt_webhook{url=URL, headers=HookHeaders, name=Name}) -> %%% Bucket Types Functions %%%=================================================================== -%% @doc create and immediately activate a bucket type -create_and_activate_bucket_type(Node, Type, Props) -> - ok = rpc:call(Node, riak_core_bucket_type, create, [Type, Props]), - wait_until_bucket_type_status(Type, ready, Node), - ok = rpc:call(Node, riak_core_bucket_type, activate, [Type]), - wait_until_bucket_type_status(Type, active, Node). - -wait_until_bucket_type_status(Type, ExpectedStatus, Nodes) when is_list(Nodes) -> - [wait_until_bucket_type_status(Type, ExpectedStatus, Node) || Node <- Nodes]; -wait_until_bucket_type_status(Type, ExpectedStatus, Node) -> - F = fun() -> - ActualStatus = rpc:call(Node, riak_core_bucket_type, status, [Type]), - ExpectedStatus =:= ActualStatus - end, - ?assertEqual(ok, rt:wait_until(F)). - --spec bucket_type_visible([atom()], binary()|{binary(), binary()}) -> boolean(). -bucket_type_visible(Nodes, Type) -> - MaxTime = rt_config:get(rt_max_wait_time), - IsVisible = fun erlang:is_list/1, - {Res, NodesDown} = rpc:multicall(Nodes, riak_core_bucket_type, get, [Type], MaxTime), - NodesDown == [] andalso lists:all(IsVisible, Res). - -wait_until_bucket_type_visible(Nodes, Type) -> - F = fun() -> bucket_type_visible(Nodes, Type) end, - ?assertEqual(ok, rt:wait_until(F)). - --spec see_bucket_props([atom()], binary()|{binary(), binary()}, - proplists:proplist()) -> boolean(). -see_bucket_props(Nodes, Bucket, ExpectProps) -> - MaxTime = rt_config:get(rt_max_wait_time), - IsBad = fun({badrpc, _}) -> true; - ({error, _}) -> true; - (Res) when is_list(Res) -> false - end, - HasProps = fun(ResProps) -> - lists:all(fun(P) -> lists:member(P, ResProps) end, - ExpectProps) - end, - case rpc:multicall(Nodes, riak_core_bucket, get_bucket, [Bucket], MaxTime) of - {Res, []} -> - % No nodes down, check no errors - case lists:any(IsBad, Res) of - true -> - false; - false -> - lists:all(HasProps, Res) - end; - {_, _NodesDown} -> - false - end. - -wait_until_bucket_props(Nodes, Bucket, Props) -> - F = fun() -> - see_bucket_props(Nodes, Bucket, Props) - end, - ?assertEqual(ok, rt:wait_until(F)). - %% @doc Set up in memory log capture to check contents in a test. setup_log_capture(Nodes) when is_list(Nodes) -> diff --git a/src/rt_bucket_types.erl b/src/rt_bucket_types.erl new file mode 100644 index 000000000..1443283fb --- /dev/null +++ b/src/rt_bucket_types.erl @@ -0,0 +1,88 @@ +%% ------------------------------------------------------------------- +%% +%% Copyright (c) 2014 Basho Technologies, Inc. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +-module(rt_bucket_types). +-include_lib("eunit/include/eunit.hrl"). + +-export([create_and_activate_bucket_type/3, + wait_until_bucket_type_visible/2, + wait_until_bucket_type_status/3, + wait_until_bucket_props/3]). + +-include("rt.hrl"). + +%% @doc create and immediately activate a bucket type +create_and_activate_bucket_type(Node, Type, Props) -> + ok = rpc:call(Node, riak_core_bucket_type, create, [Type, Props]), + wait_until_bucket_type_status(Type, ready, Node), + ok = rpc:call(Node, riak_core_bucket_type, activate, [Type]), + wait_until_bucket_type_status(Type, active, Node). + +wait_until_bucket_type_status(Type, ExpectedStatus, Nodes) when is_list(Nodes) -> + [wait_until_bucket_type_status(Type, ExpectedStatus, Node) || Node <- Nodes]; +wait_until_bucket_type_status(Type, ExpectedStatus, Node) -> + F = fun() -> + ActualStatus = rpc:call(Node, riak_core_bucket_type, status, [Type]), + ExpectedStatus =:= ActualStatus + end, + ?assertEqual(ok, rt:wait_until(F)). + +-spec bucket_type_visible([atom()], binary()|{binary(), binary()}) -> boolean(). +bucket_type_visible(Nodes, Type) -> + MaxTime = rt_config:get(rt_max_wait_time), + IsVisible = fun erlang:is_list/1, + {Res, NodesDown} = rpc:multicall(Nodes, riak_core_bucket_type, get, [Type], MaxTime), + NodesDown == [] andalso lists:all(IsVisible, Res). + +wait_until_bucket_type_visible(Nodes, Type) -> + F = fun() -> bucket_type_visible(Nodes, Type) end, + ?assertEqual(ok, rt:wait_until(F)). + +-spec see_bucket_props([atom()], binary()|{binary(), binary()}, + proplists:proplist()) -> boolean(). +see_bucket_props(Nodes, Bucket, ExpectProps) -> + MaxTime = rt_config:get(rt_max_wait_time), + IsBad = fun({badrpc, _}) -> true; + ({error, _}) -> true; + (Res) when is_list(Res) -> false + end, + HasProps = fun(ResProps) -> + lists:all(fun(P) -> lists:member(P, ResProps) end, + ExpectProps) + end, + case rpc:multicall(Nodes, riak_core_bucket, get_bucket, [Bucket], MaxTime) of + {Res, []} -> + % No nodes down, check no errors + case lists:any(IsBad, Res) of + true -> + false; + false -> + lists:all(HasProps, Res) + end; + {_, _NodesDown} -> + false + end. + +wait_until_bucket_props(Nodes, Bucket, Props) -> + F = fun() -> + see_bucket_props(Nodes, Bucket, Props) + end, + ?assertEqual(ok, rt:wait_until(F)). + diff --git a/tests/bucket_types.erl b/tests/bucket_types.erl index c08538188..118c96323 100644 --- a/tests/bucket_types.erl +++ b/tests/bucket_types.erl @@ -105,8 +105,8 @@ confirm(#rt_properties{nodes=Nodes}, _MD) -> lager:info("custom type get/put test"), Type = <<"mytype">>, - rt:create_and_activate_bucket_type(Node, Type, [{n_val, 3}]), - rt:wait_until_bucket_type_status(Type, active, Nodes), + rt_bucket_types:create_and_activate_bucket_type(Node, Type, [{n_val, 3}]), + rt_bucket_types:wait_until_bucket_type_status(Type, active, Nodes), lager:info("doing put"), riakc_pb_socket:put(PB, riakc_obj:new({Type, <<"bucket">>}, @@ -195,8 +195,8 @@ confirm(#rt_properties{nodes=Nodes}, _MD) -> %% make sure a newly created type is not affected either %% create a new type Type2 = <<"mynewtype">>, - rt:create_and_activate_bucket_type(Node, Type2, []), - rt:wait_until_bucket_type_status(Type2, active, Nodes), + rt_bucket_types:create_and_activate_bucket_type(Node, Type2, []), + rt_bucket_types:wait_until_bucket_type_status(Type2, active, Nodes), {ok, BProps11} = riakc_pb_socket:get_bucket_type(PB, Type2), diff --git a/tests/ensemble_basic3.erl b/tests/ensemble_basic3.erl index 529c561cc..f949e1fb3 100644 --- a/tests/ensemble_basic3.erl +++ b/tests/ensemble_basic3.erl @@ -37,7 +37,7 @@ confirm() -> ensemble_util:wait_until_stable(Node, NVal), lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, + rt_bucket_types:create_and_activate_bucket_type(Node, <<"strong">>, [{consistent, true}, {n_val, NVal}]), ensemble_util:wait_until_stable(Node, NVal), Bucket = {<<"strong">>, <<"test">>}, diff --git a/tests/ensemble_basic4.erl b/tests/ensemble_basic4.erl index 2050e0eeb..9c1f0abf3 100644 --- a/tests/ensemble_basic4.erl +++ b/tests/ensemble_basic4.erl @@ -32,7 +32,7 @@ confirm() -> Node = hd(Nodes), lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, + rt_bucket_types:create_and_activate_bucket_type(Node, <<"strong">>, [{consistent, true}, {n_val, NVal}]), ensemble_util:wait_until_stable(Node, NVal), Bucket = {<<"strong">>, <<"test">>}, diff --git a/tests/ensemble_interleave.erl b/tests/ensemble_interleave.erl index 5aeb78ca4..2db3da566 100644 --- a/tests/ensemble_interleave.erl +++ b/tests/ensemble_interleave.erl @@ -46,7 +46,7 @@ confirm() -> vnode_util:load(Nodes), lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, + rt_bucket_types:create_and_activate_bucket_type(Node, <<"strong">>, [{consistent, true}, {n_val, NVal}]), ensemble_util:wait_until_stable(Node, NVal), Bucket = {<<"strong">>, <<"test">>}, diff --git a/tests/ensemble_remove_node2.erl b/tests/ensemble_remove_node2.erl index 4477de2c6..f5232ed05 100644 --- a/tests/ensemble_remove_node2.erl +++ b/tests/ensemble_remove_node2.erl @@ -39,7 +39,7 @@ confirm() -> {ok, _} = riak_ensemble_client:kget(Node, root, testerooni, 1000), lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, + rt_bucket_types:create_and_activate_bucket_type(Node, <<"strong">>, [{consistent, true}, {n_val, NVal}]), ensemble_util:wait_until_stable(Node, NVal), Bucket = {<<"strong">>, <<"test">>}, diff --git a/tests/ensemble_sync.erl b/tests/ensemble_sync.erl index 5128a0ae5..9142e3616 100644 --- a/tests/ensemble_sync.erl +++ b/tests/ensemble_sync.erl @@ -30,7 +30,7 @@ confirm() -> vnode_util:load(Nodes), lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, + rt_bucket_types:create_and_activate_bucket_type(Node, <<"strong">>, [{consistent, true}, {n_val, NVal}]), ensemble_util:wait_until_stable(Node, NVal), diff --git a/tests/ensemble_vnode_crash.erl b/tests/ensemble_vnode_crash.erl index 7bd59bdf0..35e666ebf 100644 --- a/tests/ensemble_vnode_crash.erl +++ b/tests/ensemble_vnode_crash.erl @@ -35,7 +35,7 @@ confirm() -> ensemble_util:wait_until_stable(Node, NVal), lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, + rt_bucket_types:create_and_activate_bucket_type(Node, <<"strong">>, [{consistent, true}, {n_val, NVal}]), ensemble_util:wait_until_stable(Node, NVal), Bucket = {<<"strong">>, <<"test">>}, diff --git a/tests/http_bucket_types.erl b/tests/http_bucket_types.erl index e379246e4..8b3d147c1 100644 --- a/tests/http_bucket_types.erl +++ b/tests/http_bucket_types.erl @@ -107,7 +107,7 @@ confirm(#rt_properties{nodes=Nodes}, _MD) -> lager:info("custom type get/put test"), %% create a new type - ok = rt:create_and_activate_bucket_type(Node, <<"mytype">>, [{n_val,3}]), + ok = rt_bucket_types:create_and_activate_bucket_type(Node, <<"mytype">>, [{n_val,3}]), %% allow cluster metadata some time to propogate timer:sleep(1000), @@ -142,7 +142,7 @@ confirm(#rt_properties{nodes=Nodes}, _MD) -> UCBBin = {UnicodeTypeBin, UnicodeBucketBin}, - ok = rt:create_and_activate_bucket_type(Node, UnicodeTypeBin, [{n_val,3}]), + ok = rt_bucket_types:create_and_activate_bucket_type(Node, UnicodeTypeBin, [{n_val,3}]), lager:info("doing put"), ok = rhc:put(RHC, riakc_obj:new(UCBBin, @@ -231,7 +231,7 @@ confirm(#rt_properties{nodes=Nodes}, _MD) -> %% make sure a newly created type is not affected either %% create a new type - ok = rt:create_and_activate_bucket_type(Node, <<"mynewtype">>, []), + ok = rt_bucket_types:create_and_activate_bucket_type(Node, <<"mynewtype">>, []), %% allow cluster metadata some time to propogate timer:sleep(1000), diff --git a/tests/http_security.erl b/tests/http_security.erl index d7b4dcc6b..51c3083c5 100644 --- a/tests/http_security.erl +++ b/tests/http_security.erl @@ -246,7 +246,7 @@ confirm() -> "default", "from", Username]]), %% list keys with bucket type - rt:create_and_activate_bucket_type(Node, <<"list-keys-test">>, []), + rt_bucket_types:create_and_activate_bucket_type(Node, <<"list-keys-test">>, []), lager:info("Checking that list keys on a bucket-type is disallowed"), ?assertMatch({error, {"403", _}}, rhc:list_keys(C7, {<<"list-keys-test">>, <<"hello">>})), @@ -542,9 +542,9 @@ crdt_tests([Node|_]=Nodes, RHC) -> Types = [{<<"counters">>, counter, riakc_counter:to_op(riakc_counter:increment(5, riakc_counter:new()))}, {<<"sets">>, set, riakc_set:to_op(riakc_set:add_element(<<"foo">>, riakc_set:new()))}], [ begin - rt:create_and_activate_bucket_type(Node, BType, [{allow_mult, true}, {datatype, DType}]), - rt:wait_until_bucket_type_status(BType, active, Nodes), - rt:wait_until_bucket_type_visible(Nodes, BType) + rt_bucket_types:create_and_activate_bucket_type(Node, BType, [{allow_mult, true}, {datatype, DType}]), + rt_bucket_types:wait_until_bucket_type_status(BType, active, Nodes), + rt_bucket_types:wait_until_bucket_type_visible(Nodes, BType) end || {BType, DType, _Op} <- Types ], lager:info("Checking that CRDT fetch is denied"), diff --git a/tests/mapred_basic_compat.erl b/tests/mapred_basic_compat.erl index ee7f9b91f..35d6cba56 100644 --- a/tests/mapred_basic_compat.erl +++ b/tests/mapred_basic_compat.erl @@ -45,9 +45,9 @@ confirm() -> [Node1|_] = Nodes, %% create a new type - rt:create_and_activate_bucket_type(Node1, ?BUCKET_TYPE, [{n_val, 3}]), - rt:wait_until_bucket_type_status(?BUCKET_TYPE, active, Nodes), - rt:wait_until_bucket_type_visible(Nodes, ?BUCKET_TYPE), + rt_bucket_types:create_and_activate_bucket_type(Node1, ?BUCKET_TYPE, [{n_val, 3}]), + rt_bucket_types:wait_until_bucket_type_status(?BUCKET_TYPE, active, Nodes), + rt_bucket_types:wait_until_bucket_type_visible(Nodes, ?BUCKET_TYPE), load_test_data(Nodes), rt:load_modules_on_nodes([?MODULE], Nodes), diff --git a/tests/pb_security.erl b/tests/pb_security.erl index 2562615f4..47c203869 100644 --- a/tests/pb_security.erl +++ b/tests/pb_security.erl @@ -544,9 +544,9 @@ confirm() -> %%%%%%%%%%%% %% create a new type - rt:create_and_activate_bucket_type(Node, <<"mytype">>, [{n_val, 3}]), - rt:wait_until_bucket_type_status(<<"mytype">>, active, Nodes), - rt:wait_until_bucket_type_visible(Nodes, <<"mytype">>), + rt_bucket_types:create_and_activate_bucket_type(Node, <<"mytype">>, [{n_val, 3}]), + rt_bucket_types:wait_until_bucket_type_status(<<"mytype">>, active, Nodes), + rt_bucket_types:wait_until_bucket_type_visible(Nodes, <<"mytype">>), lager:info("Checking that get on a new bucket type is disallowed"), ?assertMatch({error, <<"Permission", _/binary>>}, riakc_pb_socket:get(PB, @@ -621,9 +621,9 @@ confirm() -> lager:info("Creating another bucket type"), %% create a new type - rt:create_and_activate_bucket_type(Node, <<"mytype2">>, [{allow_mult, true}]), - rt:wait_until_bucket_type_status(<<"mytype2">>, active, Nodes), - rt:wait_until_bucket_type_visible(Nodes, <<"mytype2">>), + rt_bucket_types:create_and_activate_bucket_type(Node, <<"mytype2">>, [{allow_mult, true}]), + rt_bucket_types:wait_until_bucket_type_status(<<"mytype2">>, active, Nodes), + rt_bucket_types:wait_until_bucket_type_visible(Nodes, <<"mytype2">>), lager:info("Checking that get on the new type is disallowed"), ?assertMatch({error, <<"Permission", _/binary>>}, riakc_pb_socket:get(PB, @@ -772,9 +772,9 @@ crdt_tests([Node|_]=Nodes, PB) -> {<<"sets">>, set, riakc_set:to_op(riakc_set:add_element(<<"foo">>, riakc_set:new()))}, {<<"maps">>, map, riakc_map:to_op(riakc_map:update({<<"bar">>, counter}, fun(In) -> riakc_counter:increment(In) end, riakc_map:new()))}], [ begin - rt:create_and_activate_bucket_type(Node, BType, [{allow_mult, true}, {datatype, DType}]), - rt:wait_until_bucket_type_status(BType, active, Nodes), - rt:wait_until_bucket_type_visible(Nodes, BType) + rt_bucket_types:create_and_activate_bucket_type(Node, BType, [{allow_mult, true}, {datatype, DType}]), + rt_bucket_types:wait_until_bucket_type_status(BType, active, Nodes), + rt_bucket_types:wait_until_bucket_type_visible(Nodes, BType) end || {BType, DType, _Op} <- Types ], lager:info("Checking that CRDT fetch is denied"), diff --git a/tests/replication/repl_bucket_types.erl b/tests/replication/repl_bucket_types.erl index 1a78bdd82..3aaf8d499 100644 --- a/tests/replication/repl_bucket_types.erl +++ b/tests/replication/repl_bucket_types.erl @@ -25,28 +25,28 @@ setup(Type) -> {DefinedType, UndefType} = Types = {<<"working_type">>, <<"undefined_type">>}, - rt:create_and_activate_bucket_type(LeaderA, + rt_bucket_types:create_and_activate_bucket_type(LeaderA, DefinedType, [{n_val, 3}, {allow_mult, false}]), - rt:wait_until_bucket_type_status(DefinedType, active, ANodes), - rt:wait_until_bucket_type_visible(ANodes, DefinedType), + rt_bucket_types:wait_until_bucket_type_status(DefinedType, active, ANodes), + rt_bucket_types:wait_until_bucket_type_visible(ANodes, DefinedType), case Type of current -> - rt:create_and_activate_bucket_type(LeaderB, + rt_bucket_types:create_and_activate_bucket_type(LeaderB, DefinedType, [{n_val, 3}, {allow_mult, false}]), - rt:wait_until_bucket_type_status(DefinedType, active, BNodes), - rt:wait_until_bucket_type_visible(BNodes, DefinedType); + rt_bucket_types:wait_until_bucket_type_status(DefinedType, active, BNodes), + rt_bucket_types:wait_until_bucket_type_visible(BNodes, DefinedType); mixed -> ok end, - rt:create_and_activate_bucket_type(LeaderA, + rt_bucket_types:create_and_activate_bucket_type(LeaderA, UndefType, [{n_val, 3}, {allow_mult, false}]), - rt:wait_until_bucket_type_status(UndefType, active, ANodes), - rt:wait_until_bucket_type_visible(ANodes, UndefType), + rt_bucket_types:wait_until_bucket_type_status(UndefType, active, ANodes), + rt_bucket_types:wait_until_bucket_type_visible(ANodes, UndefType), connect_clusters(LeaderA, LeaderB), {ClusterNodes, Types, PBA, PBB}. diff --git a/tests/replication/repl_consistent_object_filter.erl b/tests/replication/repl_consistent_object_filter.erl index 8766322d8..213b459c8 100644 --- a/tests/replication/repl_consistent_object_filter.erl +++ b/tests/replication/repl_consistent_object_filter.erl @@ -27,18 +27,18 @@ confirm() -> BucketType = <<"consistent_type">>, %% Create consistent bucket type on cluster A - rt:create_and_activate_bucket_type(LeaderA, + rt_bucket_types:create_and_activate_bucket_type(LeaderA, BucketType, [{consistent, true}, {n_val, 5}]), - rt:wait_until_bucket_type_status(BucketType, active, ANodes), - rt:wait_until_bucket_type_visible(ANodes, BucketType), + rt_bucket_types:wait_until_bucket_type_status(BucketType, active, ANodes), + rt_bucket_types:wait_until_bucket_type_visible(ANodes, BucketType), %% Create consistent bucket type on cluster B - rt:create_and_activate_bucket_type(LeaderB, + rt_bucket_types:create_and_activate_bucket_type(LeaderB, BucketType, [{consistent, true}, {n_val, 5}]), - rt:wait_until_bucket_type_status(BucketType, active, BNodes), - rt:wait_until_bucket_type_visible(BNodes, BucketType), + rt_bucket_types:wait_until_bucket_type_status(BucketType, active, BNodes), + rt_bucket_types:wait_until_bucket_type_visible(BNodes, BucketType), connect_clusters(LeaderA, LeaderB), diff --git a/tests/verify_conditional_postcommit.erl b/tests/verify_conditional_postcommit.erl index 24112468f..d5e4208fe 100644 --- a/tests/verify_conditional_postcommit.erl +++ b/tests/verify_conditional_postcommit.erl @@ -29,8 +29,8 @@ confirm() -> ok = rt:load_modules_on_nodes([?MODULE], Nodes), lager:info("Creating bucket types 'type1' and 'type2'"), - rt:create_and_activate_bucket_type(Node, <<"type1">>, [{magic, false}]), - rt:create_and_activate_bucket_type(Node, <<"type2">>, [{magic, true}]), + rt_bucket_types:create_and_activate_bucket_type(Node, <<"type1">>, [{magic, false}]), + rt_bucket_types:create_and_activate_bucket_type(Node, <<"type2">>, [{magic, true}]), lager:info("Installing conditional hook"), CondHook = {?MODULE, conditional_hook}, diff --git a/tests/verify_dt_context.erl b/tests/verify_dt_context.erl index 4a355962f..b4b6e11e1 100644 --- a/tests/verify_dt_context.erl +++ b/tests/verify_dt_context.erl @@ -199,7 +199,7 @@ create_pb_clients(Nodes) -> create_bucket_types([N1|_], Types) -> lager:info("Creating bucket types with datatypes: ~p", [Types]), - [rt:create_and_activate_bucket_type(N1, Name, [{datatype, Type}, {allow_mult, true}]) + [rt_bucket_types:create_and_activate_bucket_type(N1, Name, [{datatype, Type}, {allow_mult, true}]) || {Name, Type} <- Types ]. bucket_type_ready_fun(Name) -> diff --git a/tests/verify_handoff.erl b/tests/verify_handoff.erl index 16fca5c81..beb76d3de 100644 --- a/tests/verify_handoff.erl +++ b/tests/verify_handoff.erl @@ -84,7 +84,7 @@ run_test(TestMode, NTestItems, NTestNodes, HandoffEncoding) -> lager:info("Populating root node."), rt:systest_write(RootNode, NTestItems), %% write one object with a bucket type - rt:create_and_activate_bucket_type(RootNode, <<"type">>, []), + rt_bucket_types:create_and_activate_bucket_type(RootNode, <<"type">>, []), %% allow cluster metadata some time to propogate rt:systest_write(RootNode, 1, 2, {<<"type">>, <<"bucket">>}, 2), diff --git a/tests/verify_listkeys_eqcfsm.erl b/tests/verify_listkeys_eqcfsm.erl index f42206da4..d8ef9d672 100644 --- a/tests/verify_listkeys_eqcfsm.erl +++ b/tests/verify_listkeys_eqcfsm.erl @@ -183,9 +183,9 @@ setup_cluster(NumNodes) -> ?assertEqual(ok, rt:wait_until_transfers_complete(Nodes)), Node = hd(Nodes), [begin - rt:create_and_activate_bucket_type(Node, BucketType, [{n_val, NVal}]), - rt:wait_until_bucket_type_status(BucketType, active, Nodes), - rt:wait_until_bucket_type_visible(Nodes, BucketType) + rt_bucket_types:create_and_activate_bucket_type(Node, BucketType, [{n_val, NVal}]), + rt_bucket_types:wait_until_bucket_type_status(BucketType, active, Nodes), + rt_bucket_types:wait_until_bucket_type_visible(Nodes, BucketType) end || {BucketType, NVal} <- bucket_types()], Nodes. diff --git a/tests/yz_ensemble.erl b/tests/yz_ensemble.erl index 318fa6ae6..ba8a7dcd5 100644 --- a/tests/yz_ensemble.erl +++ b/tests/yz_ensemble.erl @@ -26,7 +26,7 @@ confirm() -> Node = hd(Nodes), lager:info("Creating/activating 'strong' bucket type"), - rt:create_and_activate_bucket_type(Node, <<"strong">>, + rt_bucket_types:create_and_activate_bucket_type(Node, <<"strong">>, [{consistent, true}, {n_val, NVal}]), Bucket = {<<"strong">>, <<"test">>}, From 8e51fb4627794357d6718795e3912d5d7975e222 Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 22:30:51 -0400 Subject: [PATCH 13/17] Move AAE related functions from rt module to rt_aae. --- src/rt.erl | 79 -------------- src/rt_aae.erl | 103 +++++++++++++++++++ tests/gh_riak_kv_765.erl | 2 +- tests/replication/repl_aae_fullsync.erl | 24 ++--- tests/replication/repl_aae_fullsync_util.erl | 4 +- tests/replication/repl_fs_bench.erl | 8 +- tests/replication/repl_util.erl | 2 +- tests/verify_2i_aae.erl | 4 +- 8 files changed, 125 insertions(+), 101 deletions(-) create mode 100644 src/rt_aae.erl diff --git a/src/rt.erl b/src/rt.erl index b5ede7d8e..feff49a63 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -73,7 +73,6 @@ wait_until/3, wait_until/2, wait_until/1, - wait_until_aae_trees_built/1, wait_until_all_members/1, wait_until_all_members/2, wait_until_capability/3, @@ -552,84 +551,6 @@ wait_until_nodes_agree_about_ownership(Nodes) -> Results = [ wait_until_owners_according_to(Node, Nodes) || Node <- Nodes ], ?assert(lists:all(fun(X) -> ok =:= X end, Results)). -%% AAE support -wait_until_aae_trees_built(Nodes) -> - lager:info("Wait until AAE builds all partition trees across ~p", [Nodes]), - BuiltFun = fun() -> lists:foldl(aae_tree_built_fun(), true, Nodes) end, - ?assertEqual(ok, wait_until(BuiltFun)), - ok. - -aae_tree_built_fun() -> - fun(Node, _AllBuilt = true) -> - case get_aae_tree_info(Node) of - {ok, TreeInfos} -> - case all_trees_have_build_times(TreeInfos) of - true -> - Partitions = [I || {I, _} <- TreeInfos], - all_aae_trees_built(Node, Partitions); - false -> - some_trees_not_built - end; - Err -> - Err - end; - (_Node, Err) -> - Err - end. - -% It is unlikely but possible to get a tree built time from compute_tree_info -% but an attempt to use the tree returns not_built. This is because the build -% process has finished, but the lock on the tree won't be released until it -% dies and the manager detects it. Yes, this is super freaking paranoid. -all_aae_trees_built(Node, Partitions) -> - %% Notice that the process locking is spawned by the - %% pmap. That's important! as it should die eventually - %% so the lock is released and the test can lock the tree. - IndexBuilts = rt:pmap(index_built_fun(Node), Partitions), - BadOnes = [R || R <- IndexBuilts, R /= true], - case BadOnes of - [] -> - true; - _ -> - BadOnes - end. - -get_aae_tree_info(Node) -> - case rpc:call(Node, riak_kv_entropy_info, compute_tree_info, []) of - {badrpc, _} -> - {error, {badrpc, Node}}; - Info -> - lager:debug("Entropy table on node ~p : ~p", [Node, Info]), - {ok, Info} - end. - -all_trees_have_build_times(Info) -> - not lists:keymember(undefined, 2, Info). - -index_built_fun(Node) -> - fun(Idx) -> - case rpc:call(Node, riak_kv_vnode, - hashtree_pid, [Idx]) of - {ok, TreePid} -> - case rpc:call(Node, riak_kv_index_hashtree, - get_lock, [TreePid, for_riak_test]) of - {badrpc, _} -> - {error, {badrpc, Node}}; - TreeLocked when TreeLocked == ok; - TreeLocked == already_locked -> - true; - Err -> - % Either not_built or some unhandled result, - % in which case update this case please! - {error, {index_not_built, Node, Idx, Err}} - end; - {error, _}=Err -> - Err; - {badrpc, _} -> - {error, {badrpc, Node}} - end - end. - %%%=================================================================== %%% Basic Read/Write Functions %%%=================================================================== diff --git a/src/rt_aae.erl b/src/rt_aae.erl new file mode 100644 index 000000000..841a2f15d --- /dev/null +++ b/src/rt_aae.erl @@ -0,0 +1,103 @@ +%% ------------------------------------------------------------------- +%% +%% Copyright (c) 2014 Basho Technologies, Inc. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- + +-module(rt_aae). +-include_lib("eunit/include/eunit.hrl"). + +-export([wait_until_aae_trees_built/1]). + +-include("rt.hrl"). + +wait_until_aae_trees_built(Nodes) -> + lager:info("Wait until AAE builds all partition trees across ~p", [Nodes]), + BuiltFun = fun() -> lists:foldl(aae_tree_built_fun(), true, Nodes) end, + ?assertEqual(ok, rt:wait_until(BuiltFun)), + ok. + +aae_tree_built_fun() -> + fun(Node, _AllBuilt = true) -> + case get_aae_tree_info(Node) of + {ok, TreeInfos} -> + case all_trees_have_build_times(TreeInfos) of + true -> + Partitions = [I || {I, _} <- TreeInfos], + all_aae_trees_built(Node, Partitions); + false -> + some_trees_not_built + end; + Err -> + Err + end; + (_Node, Err) -> + Err + end. + +% It is unlikely but possible to get a tree built time from compute_tree_info +% but an attempt to use the tree returns not_built. This is because the build +% process has finished, but the lock on the tree won't be released until it +% dies and the manager detects it. Yes, this is super freaking paranoid. +all_aae_trees_built(Node, Partitions) -> + %% Notice that the process locking is spawned by the + %% pmap. That's important! as it should die eventually + %% so the lock is released and the test can lock the tree. + IndexBuilts = rt:pmap(index_built_fun(Node), Partitions), + BadOnes = [R || R <- IndexBuilts, R /= true], + case BadOnes of + [] -> + true; + _ -> + BadOnes + end. + +get_aae_tree_info(Node) -> + case rpc:call(Node, riak_kv_entropy_info, compute_tree_info, []) of + {badrpc, _} -> + {error, {badrpc, Node}}; + Info -> + lager:debug("Entropy table on node ~p : ~p", [Node, Info]), + {ok, Info} + end. + +all_trees_have_build_times(Info) -> + not lists:keymember(undefined, 2, Info). + +index_built_fun(Node) -> + fun(Idx) -> + case rpc:call(Node, riak_kv_vnode, + hashtree_pid, [Idx]) of + {ok, TreePid} -> + case rpc:call(Node, riak_kv_index_hashtree, + get_lock, [TreePid, for_riak_test]) of + {badrpc, _} -> + {error, {badrpc, Node}}; + TreeLocked when TreeLocked == ok; + TreeLocked == already_locked -> + true; + Err -> + % Either not_built or some unhandled result, + % in which case update this case please! + {error, {index_not_built, Node, Idx, Err}} + end; + {error, _}=Err -> + Err; + {badrpc, _} -> + {error, {badrpc, Node}} + end + end. diff --git a/tests/gh_riak_kv_765.erl b/tests/gh_riak_kv_765.erl index 0c5c880cb..834b6c964 100644 --- a/tests/gh_riak_kv_765.erl +++ b/tests/gh_riak_kv_765.erl @@ -88,7 +88,7 @@ check_throttle_and_expiration() -> time_build(Node) -> T0 = erlang:now(), - rt:wait_until_aae_trees_built([Node]), + rt_aae:wait_until_aae_trees_built([Node]), Duration = timer:now_diff(erlang:now(), T0), lager:info("Build took ~b us", [Duration]), Duration. diff --git a/tests/replication/repl_aae_fullsync.erl b/tests/replication/repl_aae_fullsync.erl index 4b817775f..94f1d52b0 100644 --- a/tests/replication/repl_aae_fullsync.erl +++ b/tests/replication/repl_aae_fullsync.erl @@ -91,8 +91,8 @@ simple_test() -> read_from_cluster(BFirst, 1, ?NUM_KEYS, ?NUM_KEYS), %% Wait for trees to compute. - rt:wait_until_aae_trees_built(ANodes), - rt:wait_until_aae_trees_built(BNodes), + rt_aae:wait_until_aae_trees_built(ANodes), + rt_aae:wait_until_aae_trees_built(BNodes), lager:info("Test fullsync from cluster A leader ~p to cluster B", [LeaderA]), @@ -186,9 +186,9 @@ dual_test() -> rt:wait_until_ring_converged(ANodes), %% Wait for trees to compute. - rt:wait_until_aae_trees_built(ANodes), - rt:wait_until_aae_trees_built(BNodes), - rt:wait_until_aae_trees_built(CNodes), + rt_aae:wait_until_aae_trees_built(ANodes), + rt_aae:wait_until_aae_trees_built(BNodes), + rt_aae:wait_until_aae_trees_built(CNodes), %% Flush AAE trees to disk. perform_sacrifice(AFirst), @@ -278,7 +278,7 @@ bidirectional_test() -> perform_sacrifice(AFirst), %% Wait for trees to compute. - rt:wait_until_aae_trees_built(ANodes), + rt_aae:wait_until_aae_trees_built(ANodes), %% Verify A replicated to B. validate_completed_fullsync(LeaderA, BFirst, "B", 1, ?NUM_KEYS), @@ -291,7 +291,7 @@ bidirectional_test() -> perform_sacrifice(BFirst), %% Wait for trees to compute. - rt:wait_until_aae_trees_built(BNodes), + rt_aae:wait_until_aae_trees_built(BNodes), %% Verify B replicated to A. validate_completed_fullsync(LeaderB, AFirst, "A", ?NUM_KEYS + 1, ?NUM_KEYS + ?NUM_KEYS), @@ -350,8 +350,8 @@ difference_test() -> [{timeout, 4000}]), %% Wait for trees to compute. - rt:wait_until_aae_trees_built(ANodes), - rt:wait_until_aae_trees_built(BNodes), + rt_aae:wait_until_aae_trees_built(ANodes), + rt_aae:wait_until_aae_trees_built(BNodes), lager:info("Test fullsync from cluster A leader ~p to cluster B", [LeaderA]), @@ -436,8 +436,8 @@ deadlock_test() -> [ok = rt_intercept:add(Target, Intercept) || Target <- ANodes], %% Wait for trees to compute. - rt:wait_until_aae_trees_built(ANodes), - rt:wait_until_aae_trees_built(BNodes), + rt_aae:wait_until_aae_trees_built(ANodes), + rt_aae:wait_until_aae_trees_built(BNodes), lager:info("Test fullsync from cluster A leader ~p to cluster B", [LeaderA]), @@ -579,7 +579,7 @@ validate_intercepted_fullsync(InterceptTarget, rt:wait_for_service(InterceptTarget, riak_repl), %% Wait until AAE trees are compueted on the rebooted node. - rt:wait_until_aae_trees_built([InterceptTarget]). + rt_aae:wait_until_aae_trees_built([InterceptTarget]). %% @doc Given a node, find the port that the cluster manager is %% listening on. diff --git a/tests/replication/repl_aae_fullsync_util.erl b/tests/replication/repl_aae_fullsync_util.erl index 139196d0b..949d0dab9 100644 --- a/tests/replication/repl_aae_fullsync_util.erl +++ b/tests/replication/repl_aae_fullsync_util.erl @@ -77,6 +77,6 @@ prepare_cluster_data(TestBucket, NumKeysAOnly, _NumKeysBoth, [AFirst|_] = ANodes ?assertEqual(NumKeysAOnly, length(Res2)), %% wait for the AAE trees to be built so that we don't get a not_built error - rt:wait_until_aae_trees_built(ANodes), - rt:wait_until_aae_trees_built(BNodes), + rt_aae:wait_until_aae_trees_built(ANodes), + rt_aae:wait_until_aae_trees_built(BNodes), ok. diff --git a/tests/replication/repl_fs_bench.erl b/tests/replication/repl_fs_bench.erl index 9d783b752..bb185b1b1 100644 --- a/tests/replication/repl_fs_bench.erl +++ b/tests/replication/repl_fs_bench.erl @@ -122,27 +122,27 @@ fullsync_test(Strategy, Latency) -> ?assertEqual(ok, repl_util:wait_for_connection(LeaderA, "B")), %% Perform fullsync of an empty cluster. - rt:wait_until_aae_trees_built(ANodes ++ BNodes), + rt_aae:wait_until_aae_trees_built(ANodes ++ BNodes), {EmptyTime, _} = timer:tc(repl_util, start_and_wait_until_fullsync_complete, [LeaderA]), %% Write keys and perform fullsync. repl_util:write_to_cluster(AFirst, 0, ?FULL_NUM_KEYS, ?TEST_BUCKET), - rt:wait_until_aae_trees_built(ANodes ++ BNodes), + rt_aae:wait_until_aae_trees_built(ANodes ++ BNodes), {FullTime, _} = timer:tc(repl_util, start_and_wait_until_fullsync_complete, [LeaderA]), %% Rewrite first 10% keys and perform fullsync. repl_util:write_to_cluster(AFirst, 0, ?DIFF_NUM_KEYS, ?TEST_BUCKET), - rt:wait_until_aae_trees_built(ANodes ++ BNodes), + rt_aae:wait_until_aae_trees_built(ANodes ++ BNodes), {DiffTime, _} = timer:tc(repl_util, start_and_wait_until_fullsync_complete, [LeaderA]), %% Write no keys, and perform the fullsync. - rt:wait_until_aae_trees_built(ANodes ++ BNodes), + rt_aae:wait_until_aae_trees_built(ANodes ++ BNodes), {NoneTime, _} = timer:tc(repl_util, start_and_wait_until_fullsync_complete, [LeaderA]), diff --git a/tests/replication/repl_util.erl b/tests/replication/repl_util.erl index d5175d884..de2c89a5f 100644 --- a/tests/replication/repl_util.erl +++ b/tests/replication/repl_util.erl @@ -620,4 +620,4 @@ validate_intercepted_fullsync(InterceptTarget, rt:wait_for_service(InterceptTarget, riak_repl), %% Wait until AAE trees are compueted on the rebooted node. - rt:wait_until_aae_trees_built([InterceptTarget]). + rt_aae:wait_until_aae_trees_built([InterceptTarget]). diff --git a/tests/verify_2i_aae.erl b/tests/verify_2i_aae.erl index a2714e773..9284a4640 100644 --- a/tests/verify_2i_aae.erl +++ b/tests/verify_2i_aae.erl @@ -68,7 +68,7 @@ check_lost_objects(Node1, PBC, NumItems, NumDel) -> ok = rpc:call(Node1, application, set_env, [riak_kv, anti_entropy, {on, [debug]}]), ok = rpc:call(Node1, riak_kv_entropy_manager, enable, []), - rt:wait_until_aae_trees_built([Node1]), + rt_aae:wait_until_aae_trees_built([Node1]), lager:info("AAE trees built, now put the rest of the data"), [put_obj(PBC, Bucket, N, N+1, Index) @@ -125,7 +125,7 @@ do_tree_rebuild(Node) -> ?assertEqual(ok, rpc:call(Node, application, set_env, [riak_kv, anti_entropy_build_limit, {100, 1000}])), - rt:wait_until_aae_trees_built([Node]), + rt_aae:wait_until_aae_trees_built([Node]), ok. %% Write objects without a 2i index. Test that running 2i repair will generate From 8abc0736e2b551a685c8b71ac4ae328b731f6f55 Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 22:53:40 -0400 Subject: [PATCH 14/17] More functions moved to rt_node. --- src/rt.erl | 37 +------------------ src/rt_cluster.erl | 6 +-- src/rt_node.erl | 37 ++++++++++++++++++- tests/ensemble_remove_node.erl | 2 +- tests/ensemble_remove_node2.erl | 2 +- tests/gh_riak_core_154.erl | 2 +- tests/gh_riak_core_176.erl | 8 ++-- tests/jmx_verify.erl | 4 +- tests/mapred_search_switch.erl | 2 +- tests/pipe_verify_handoff.erl | 2 +- tests/pipe_verify_handoff_blocking.erl | 2 +- tests/post_generate_key.erl | 2 +- tests/replication/repl_util.erl | 2 +- .../replication2_console_tests.erl | 2 +- .../replication_object_reformat.erl | 2 +- tests/riak_admin_console_tests.erl | 2 +- tests/riaknostic_rt.erl | 2 +- tests/verify_2i_limit.erl | 2 +- tests/verify_2i_mixed_cluster.erl | 2 +- tests/verify_2i_returnterms.erl | 2 +- tests/verify_2i_stream.erl | 2 +- tests/verify_2i_timeout.erl | 2 +- tests/verify_build_cluster.erl | 4 +- tests/verify_cs_bucket.erl | 2 +- tests/verify_down.erl | 6 +-- tests/verify_dynamic_ring.erl | 2 +- tests/verify_handoff.erl | 2 +- tests/verify_leave.erl | 4 +- tests/verify_listkeys.erl | 2 +- tests/verify_listkeys_eqcfsm.erl | 2 +- tests/verify_membackend.erl | 2 +- tests/verify_riak_lager.erl | 2 +- tests/verify_riak_stats.erl | 2 +- tests/verify_secondary_index_reformat.erl | 2 +- tests/verify_snmp.erl | 2 +- tests/verify_staged_clustering.erl | 6 +-- tests/verify_tick_change.erl | 2 +- 37 files changed, 84 insertions(+), 84 deletions(-) diff --git a/src/rt.erl b/src/rt.erl index feff49a63..341fad7ff 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -79,10 +79,7 @@ wait_until_capability/4, wait_until_connected/1, wait_until_legacy_ringready/1, - wait_until_owners_according_to/2, wait_until_no_pending_changes/1, - wait_until_nodes_agree_about_ownership/1, - wait_until_nodes_ready/1, wait_until_pingable/1, wait_until_ready/1, wait_until_registered/2, @@ -268,18 +265,6 @@ is_mixed_cluster(Node) -> Nodes = rpc:call(Node, erlang, nodes, []), is_mixed_cluster(Nodes). -%% @private -is_ready(Node) -> - case rpc:call(Node, riak_core_ring_manager, get_raw_ring, []) of - {ok, Ring} -> - case lists:member(Node, riak_core_ring:ready_members(Ring)) of - true -> true; - false -> {not_ready, Node} - end; - Other -> - Other - end. - %% @private is_ring_ready(Node) -> case rpc:call(Node, riak_core_ring_manager, get_raw_ring, []) of @@ -326,7 +311,7 @@ wait_until(Fun, Retry, Delay) when Retry > 0 -> %% information. wait_until_ready(Node) -> lager:info("Wait until ~p ready", [Node]), - ?assertEqual(ok, wait_until(Node, fun is_ready/1)), + ?assertEqual(ok, wait_until(Node, fun rt_node:is_ready/1)), ok. %% @doc Wait until status can be read from riak_kv_console @@ -393,13 +378,6 @@ wait_for_cluster_service(Nodes, Service) -> [?assertEqual(ok, wait_until(Node, F)) || Node <- Nodes], ok. -%% @doc Given a list of nodes, wait until all nodes are considered ready. -%% See {@link wait_until_ready/1} for definition of ready. -wait_until_nodes_ready(Nodes) -> - lager:info("Wait until nodes are ready : ~p", [Nodes]), - [?assertEqual(ok, wait_until(Node, fun is_ready/1)) || Node <- Nodes], - ok. - %% @doc Wait until all nodes in the list `Nodes' believe each other to be %% members of the cluster. wait_until_all_members(Nodes) -> @@ -538,19 +516,6 @@ cap_equal(Val, Cap) when is_list(Cap) -> cap_equal(Val, Cap) -> Val == Cap. -wait_until_owners_according_to(Node, Nodes) -> - SortedNodes = lists:usort(Nodes), - F = fun(N) -> - rt_ring:owners_according_to(N) =:= SortedNodes - end, - ?assertEqual(ok, wait_until(Node, F)), - ok. - -wait_until_nodes_agree_about_ownership(Nodes) -> - lager:info("Wait until nodes agree about ownership ~p", [Nodes]), - Results = [ wait_until_owners_according_to(Node, Nodes) || Node <- Nodes ], - ?assert(lists:all(fun(X) -> ok =:= X end, Results)). - %%%=================================================================== %%% Basic Read/Write Functions %%%=================================================================== diff --git a/src/rt_cluster.erl b/src/rt_cluster.erl index a3ccf4ccb..5a581bf31 100644 --- a/src/rt_cluster.erl +++ b/src/rt_cluster.erl @@ -167,10 +167,10 @@ join_cluster(Nodes) -> try_nodes_ready(Nodes, 3, 500) end, - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes)), %% Ensure each node owns a portion of the ring - rt:wait_until_nodes_agree_about_ownership(Nodes), + rt_node:wait_until_nodes_agree_about_ownership(Nodes), ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes)), ok. @@ -178,7 +178,7 @@ try_nodes_ready([Node1 | _Nodes], 0, _SleepMs) -> lager:info("Nodes not ready after initial plan/commit, retrying"), rt_node:plan_and_commit(Node1); try_nodes_ready(Nodes, N, SleepMs) -> - ReadyNodes = [Node || Node <- Nodes, rt:is_ready(Node) =:= true], + ReadyNodes = [Node || Node <- Nodes, rt_node:is_ready(Node) =:= true], case ReadyNodes of Nodes -> ok; diff --git a/src/rt_node.erl b/src/rt_node.erl index ee20b034e..55a53b3a0 100644 --- a/src/rt_node.erl +++ b/src/rt_node.erl @@ -28,6 +28,7 @@ stop_and_wait/1, upgrade/2, upgrade/3, + is_ready/1, slow_upgrade/3, join/2, staged_join/2, @@ -38,7 +39,10 @@ heal/1, partition/2, remove/2, - brutal_kill/1]). + brutal_kill/1, + wait_until_nodes_ready/1, + wait_until_owners_according_to/2, + wait_until_nodes_agree_about_ownership/1]). -define(HARNESS, (rt_config:get(rt_harness))). @@ -184,3 +188,34 @@ brutal_kill(Node) -> [5000, os, cmd, [io_lib:format("kill -9 ~s", [OSPidToKill])]]), rpc:cast(Node, os, cmd, [io_lib:format("kill -15 ~s", [OSPidToKill])]), ok. + +%% @doc Given a list of nodes, wait until all nodes are considered ready. +%% See {@link wait_until_ready/1} for definition of ready. +wait_until_nodes_ready(Nodes) -> + lager:info("Wait until nodes are ready : ~p", [Nodes]), + [?assertEqual(ok, rt:wait_until(Node, fun is_ready/1)) || Node <- Nodes], + ok. + +is_ready(Node) -> + case rpc:call(Node, riak_core_ring_manager, get_raw_ring, []) of + {ok, Ring} -> + case lists:member(Node, riak_core_ring:ready_members(Ring)) of + true -> true; + false -> {not_ready, Node} + end; + Other -> + Other + end. + +wait_until_owners_according_to(Node, Nodes) -> + SortedNodes = lists:usort(Nodes), + F = fun(N) -> + rt_ring:owners_according_to(N) =:= SortedNodes + end, + ?assertEqual(ok, rt:wait_until(Node, F)), + ok. + +wait_until_nodes_agree_about_ownership(Nodes) -> + lager:info("Wait until nodes agree about ownership ~p", [Nodes]), + Results = [ wait_until_owners_according_to(Node, Nodes) || Node <- Nodes ], + ?assert(lists:all(fun(X) -> ok =:= X end, Results)). diff --git a/tests/ensemble_remove_node.erl b/tests/ensemble_remove_node.erl index cefc0ba89..51f92d13a 100644 --- a/tests/ensemble_remove_node.erl +++ b/tests/ensemble_remove_node.erl @@ -59,7 +59,7 @@ confirm() -> rt_node:leave(Node3), ok = ensemble_util:wait_until_stable(Node, NVal), Remaining = Nodes -- [Node2, Node3], - rt:wait_until_nodes_agree_about_ownership(Remaining), + rt_node:wait_until_nodes_agree_about_ownership(Remaining), ok = rt:wait_until_unpingable(Node2), ok = rt:wait_until_unpingable(Node3), lager:info("Read value from the root ensemble"), diff --git a/tests/ensemble_remove_node2.erl b/tests/ensemble_remove_node2.erl index f5232ed05..ee82e716e 100644 --- a/tests/ensemble_remove_node2.erl +++ b/tests/ensemble_remove_node2.erl @@ -66,7 +66,7 @@ confirm() -> rt_node:leave(Node3), ok = ensemble_util:wait_until_stable(Node, NVal), Remaining = Nodes -- [Node2, Node3], - rt:wait_until_nodes_agree_about_ownership(Remaining), + rt_node:wait_until_nodes_agree_about_ownership(Remaining), %% TODO: How do we wait indefinitely for nodes to never exit here? A 30s %% sleep? diff --git a/tests/gh_riak_core_154.erl b/tests/gh_riak_core_154.erl index dfb53bfb4..37beaba37 100644 --- a/tests/gh_riak_core_154.erl +++ b/tests/gh_riak_core_154.erl @@ -29,7 +29,7 @@ confirm() -> %% Increase handoff concurrency on nodes NewConfig = [{riak_core, [{handoff_concurrency, 1024}]}], Nodes = rt_cluster:build_cluster(2, NewConfig), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes)), [Node1, Node2] = Nodes, lager:info("Write data while ~p is offline", [Node2]), diff --git a/tests/gh_riak_core_176.erl b/tests/gh_riak_core_176.erl index 959d14e68..df6664372 100644 --- a/tests/gh_riak_core_176.erl +++ b/tests/gh_riak_core_176.erl @@ -56,9 +56,9 @@ confirm() -> lager:info("Join ~p to the cluster and wait for handoff to finish", [Node2]), rt:join(Node2, Node1), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes12)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes12)), ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes12)), - rt:wait_until_nodes_agree_about_ownership(Nodes12), + rt_node:wait_until_nodes_agree_about_ownership(Nodes12), %% Check 0.0.0.0 address works lager:info("Change ~p handoff_ip to \"0.0.0.0\"", [Node3]), @@ -69,9 +69,9 @@ confirm() -> [Node3]), rt:wait_for_service(Node3, riak_kv), rt:join(Node3, Node1), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes123)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes123)), ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes123)), - rt:wait_until_nodes_agree_about_ownership(Nodes123), + rt_node:wait_until_nodes_agree_about_ownership(Nodes123), lager:info("Test gh_riak_core_176 passed"), pass. diff --git a/tests/jmx_verify.erl b/tests/jmx_verify.erl index 06c05b849..a4dbd50ec 100644 --- a/tests/jmx_verify.erl +++ b/tests/jmx_verify.erl @@ -34,7 +34,7 @@ confirm() -> Config = [{riak_jmx, [{enabled, true}, {port, JMXPort}]}], Nodes = rt_cluster:deploy_nodes(1, Config), [Node1] = Nodes, - ?assertEqual(ok, rt:wait_until_nodes_ready([Node1])), + ?assertEqual(ok, rt_node:wait_until_nodes_ready([Node1])), [{http, {IP, _Port}}|_] = rt:connection_info(Node1), @@ -162,7 +162,7 @@ test_application_stop() -> Config = [{riak_jmx, [{enabled, true}, {port, JMXPort}]}], Nodes = rt_cluster:deploy_nodes(1, Config), [Node] = Nodes, - ?assertEqual(ok, rt:wait_until_nodes_ready([Node])), + ?assertEqual(ok, rt_node:wait_until_nodes_ready([Node])), %% Let's make sure the java process is alive! lager:info("checking for riak_jmx.jar running."), diff --git a/tests/mapred_search_switch.erl b/tests/mapred_search_switch.erl index adf8f0833..f0c6a89d0 100644 --- a/tests/mapred_search_switch.erl +++ b/tests/mapred_search_switch.erl @@ -69,7 +69,7 @@ setup_test_env() -> %% deploy one to make the test run faster Nodes = rt_cluster:deploy_nodes(1, [{riak_search, [{enabled, true}]}, {yokozuna, [{enabled, true}]}]), - ok = rt:wait_until_nodes_ready(Nodes), + ok = rt_node:wait_until_nodes_ready(Nodes), ok = rt:wait_for_cluster_service(Nodes, riak_search), ok = rt:wait_for_cluster_service(Nodes, yokozuna), diff --git a/tests/pipe_verify_handoff.erl b/tests/pipe_verify_handoff.erl index f0f3f2251..c6bdaaad3 100644 --- a/tests/pipe_verify_handoff.erl +++ b/tests/pipe_verify_handoff.erl @@ -109,7 +109,7 @@ confirm() -> %% Give slave a chance to start and master to notice it. rt:join(Secondary, Primary), rt:wait_until_no_pending_changes(Nodes), - rt:wait_until_nodes_agree_about_ownership(Nodes), + rt_node:wait_until_nodes_agree_about_ownership(Nodes), lager:info("Unpause workers"), Runner ! go, diff --git a/tests/pipe_verify_handoff_blocking.erl b/tests/pipe_verify_handoff_blocking.erl index e8a454c54..10086211f 100644 --- a/tests/pipe_verify_handoff_blocking.erl +++ b/tests/pipe_verify_handoff_blocking.erl @@ -98,7 +98,7 @@ confirm() -> %% Give slave a chance to start and master to notice it. rt:join(Secondary, Primary), rt:wait_until_no_pending_changes(Nodes), - rt:wait_until_nodes_agree_about_ownership(Nodes), + rt_node:wait_until_nodes_agree_about_ownership(Nodes), lager:info("Unpause workers"), Runner ! go, diff --git a/tests/post_generate_key.erl b/tests/post_generate_key.erl index de77942b9..9ac63ab8c 100644 --- a/tests/post_generate_key.erl +++ b/tests/post_generate_key.erl @@ -26,7 +26,7 @@ confirm() -> Nodes = rt_cluster:build_cluster(1), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes)), [Base|_] = rt:http_url(Nodes), diff --git a/tests/replication/repl_util.erl b/tests/replication/repl_util.erl index de2c89a5f..bbdd26998 100644 --- a/tests/replication/repl_util.erl +++ b/tests/replication/repl_util.erl @@ -55,7 +55,7 @@ make_cluster(Nodes) -> [First|Rest] = Nodes, - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes)), [rt:wait_for_service(N, riak_kv) || N <- Nodes], [rt:join(Node, First) || Node <- Rest], ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes)). diff --git a/tests/replication/replication2_console_tests.erl b/tests/replication/replication2_console_tests.erl index 5d5c55fae..30c64436b 100644 --- a/tests/replication/replication2_console_tests.erl +++ b/tests/replication/replication2_console_tests.erl @@ -49,7 +49,7 @@ confirm() -> %% Deploy a node to test against lager:info("Deploy node to test riak-repl command line"), [Node] = rt_cluster:deploy_nodes(1), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node])), + ?assertEqual(ok, rt_node:wait_until_nodes_ready([Node])), rt_intercept:add(Node, {riak_repl_console, [ diff --git a/tests/replication/replication_object_reformat.erl b/tests/replication/replication_object_reformat.erl index 1163d00a1..61f924edf 100644 --- a/tests/replication/replication_object_reformat.erl +++ b/tests/replication/replication_object_reformat.erl @@ -115,7 +115,7 @@ verify_replication(AVersion, BVersion, Start, End, Realtime) -> %% Wait until the sink cluster is in a steady state before %% starting fullsync - rt:wait_until_nodes_ready(BNodes), + rt_node:wait_until_nodes_ready(BNodes), rt:wait_until_no_pending_changes(BNodes), rt:wait_until_registered(BFirst, riak_repl2_fs_node_reserver), diff --git a/tests/riak_admin_console_tests.erl b/tests/riak_admin_console_tests.erl index 95ad3b421..d8928abea 100644 --- a/tests/riak_admin_console_tests.erl +++ b/tests/riak_admin_console_tests.erl @@ -155,7 +155,7 @@ confirm() -> %% Deploy a node to test against lager:info("Deploy node to test riak command line"), [Node] = rt_cluster:deploy_nodes(1), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node])), + ?assertEqual(ok, rt_node:wait_until_nodes_ready([Node])), rt_intercept:add(Node, {riak_core_console, [ diff --git a/tests/riaknostic_rt.erl b/tests/riaknostic_rt.erl index 9193b3bf2..cbcb72285 100644 --- a/tests/riaknostic_rt.erl +++ b/tests/riaknostic_rt.erl @@ -30,7 +30,7 @@ confirm() -> %% Build a small cluster [Node1, _Node2] = rt_cluster:build_cluster(2, []), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node1])), + ?assertEqual(ok, rt_node:wait_until_nodes_ready([Node1])), %% Install riaknostic for Riak versions below 1.3.0 riaknostic_bootstrap(Node1), diff --git a/tests/verify_2i_limit.erl b/tests/verify_2i_limit.erl index 6c2acdfbf..5aa551b35 100644 --- a/tests/verify_2i_limit.erl +++ b/tests/verify_2i_limit.erl @@ -32,7 +32,7 @@ confirm() -> inets:start(), Nodes = rt_cluster:build_cluster(3), - ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), + ?assertEqual(ok, (rt_node:wait_until_nodes_ready(Nodes))), RiakHttp = rt_http:httpc(hd(Nodes)), HttpUrl = rt:http_url(hd(Nodes)), diff --git a/tests/verify_2i_mixed_cluster.erl b/tests/verify_2i_mixed_cluster.erl index f0a7aa22a..2552e2b50 100644 --- a/tests/verify_2i_mixed_cluster.erl +++ b/tests/verify_2i_mixed_cluster.erl @@ -34,7 +34,7 @@ confirm() -> rt_cluster:build_cluster([{current, [{riak_kv, [{anti_entropy, {off, []}}]}]}, OldVsn, OldVsn]), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes)), PBC1 = rt_pb:pbc(CurrentNode), PBC2 = rt_pb:pbc(OldNode1), diff --git a/tests/verify_2i_returnterms.erl b/tests/verify_2i_returnterms.erl index 7548256e3..0f38a14cc 100644 --- a/tests/verify_2i_returnterms.erl +++ b/tests/verify_2i_returnterms.erl @@ -31,7 +31,7 @@ confirm() -> inets:start(), Nodes = rt_cluster:build_cluster(3), - ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), + ?assertEqual(ok, (rt_node:wait_until_nodes_ready(Nodes))), RiakHttp = rt:http_url(hd(Nodes)), PBPid = rt_pb:pbc(hd(Nodes)), diff --git a/tests/verify_2i_stream.erl b/tests/verify_2i_stream.erl index 8440fc30a..5bd3e6871 100644 --- a/tests/verify_2i_stream.erl +++ b/tests/verify_2i_stream.erl @@ -30,7 +30,7 @@ confirm() -> inets:start(), Nodes = rt_cluster:build_cluster(3), - ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), + ?assertEqual(ok, (rt_node:wait_until_nodes_ready(Nodes))), RiakHttp = rt:http_url(hd(Nodes)), PBPid = rt_pb:pbc(hd(Nodes)), diff --git a/tests/verify_2i_timeout.erl b/tests/verify_2i_timeout.erl index 6f05fa955..be0da7e61 100644 --- a/tests/verify_2i_timeout.erl +++ b/tests/verify_2i_timeout.erl @@ -30,7 +30,7 @@ confirm() -> inets:start(), Config = [{riak_kv, [{secondary_index_timeout, 1}]}], %% ludicrously short, should fail always Nodes = rt_cluster:build_cluster([{current, Config}, {current, Config}, {current, Config}]), - ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), + ?assertEqual(ok, (rt_node:wait_until_nodes_ready(Nodes))), PBPid = rt_pb:pbc(hd(Nodes)), Http = rt:http_url(hd(Nodes)), diff --git a/tests/verify_build_cluster.erl b/tests/verify_build_cluster.erl index 051d5c616..7f87e0290 100644 --- a/tests/verify_build_cluster.erl +++ b/tests/verify_build_cluster.erl @@ -94,11 +94,11 @@ confirm(#rt_properties{nodes=Nodes}, _MD) -> wait_and_validate(Nodes) -> wait_and_validate(Nodes, Nodes). wait_and_validate(RingNodes, UpNodes) -> lager:info("Wait until all nodes are ready and there are no pending changes"), - ?assertEqual(ok, rt:wait_until_nodes_ready(UpNodes)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(UpNodes)), ?assertEqual(ok, rt:wait_until_all_members(UpNodes)), ?assertEqual(ok, rt:wait_until_no_pending_changes(UpNodes)), lager:info("Ensure each node owns a portion of the ring"), - [rt:wait_until_owners_according_to(Node, RingNodes) || Node <- UpNodes], + [rt_node:wait_until_owners_according_to(Node, RingNodes) || Node <- UpNodes], [rt:wait_for_service(Node, riak_kv) || Node <- UpNodes], lager:info("Verify that you got much data... (this is how we do it)"), ?assertEqual([], rt:systest_read(hd(UpNodes), 0, 1000, <<"verify_build_cluster">>, 2)), diff --git a/tests/verify_cs_bucket.erl b/tests/verify_cs_bucket.erl index 4b517b043..470615a54 100644 --- a/tests/verify_cs_bucket.erl +++ b/tests/verify_cs_bucket.erl @@ -30,7 +30,7 @@ confirm() -> Nodes = rt_cluster:build_cluster(3), - ?assertEqual(ok, (rt:wait_until_nodes_ready(Nodes))), + ?assertEqual(ok, (rt_node:wait_until_nodes_ready(Nodes))), PBPid = rt_pb:pbc(hd(Nodes)), diff --git a/tests/verify_down.erl b/tests/verify_down.erl index 488b65cb0..2bc539849 100644 --- a/tests/verify_down.erl +++ b/tests/verify_down.erl @@ -29,7 +29,7 @@ confirm() -> %% Join node2 to node1 and wait for cluster convergence lager:info("Join ~p to ~p", [Node2, Node1]), rt:join(Node2, Node1), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node1, Node2])), + ?assertEqual(ok, rt_node:wait_until_nodes_ready([Node1, Node2])), ?assertEqual(ok, rt:wait_until_no_pending_changes([Node1, Node2])), %% Shutdown node2 @@ -59,10 +59,10 @@ confirm() -> %% Restart node2 and wait for ring convergence lager:info("Restart ~p and wait for ring convergence", [Node2]), rt_node:start(Node2), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node2])), + ?assertEqual(ok, rt_node:wait_until_nodes_ready([Node2])), ?assertEqual(ok, rt:wait_until_ring_converged(Nodes)), %% Verify that all three nodes are ready lager:info("Ensure all nodes are ready"), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes)), pass. diff --git a/tests/verify_dynamic_ring.erl b/tests/verify_dynamic_ring.erl index 0e9b9d072..19cfdbda5 100644 --- a/tests/verify_dynamic_ring.erl +++ b/tests/verify_dynamic_ring.erl @@ -41,7 +41,7 @@ confirm() -> %% NewNodes = [ANode, YetAnother, ReplacingNode], rt:join(AnotherNode, ANode), rt:join(YetAnother, ANode), - rt:wait_until_nodes_agree_about_ownership(Nodes), + rt_node:wait_until_nodes_agree_about_ownership(Nodes), rt:wait_until_ring_converged(Nodes), rt:wait_until_no_pending_changes(Nodes), diff --git a/tests/verify_handoff.erl b/tests/verify_handoff.erl index beb76d3de..7e7e63039 100644 --- a/tests/verify_handoff.erl +++ b/tests/verify_handoff.erl @@ -108,7 +108,7 @@ test_handoff(RootNode, NewNode, NTestItems) -> lager:info("Joining new node with cluster."), rt:join(NewNode, RootNode), - ?assertEqual(ok, rt:wait_until_nodes_ready([RootNode, NewNode])), + ?assertEqual(ok, rt_node:wait_until_nodes_ready([RootNode, NewNode])), rt:wait_until_no_pending_changes([RootNode, NewNode]), %% See if we get the same data back from the joined node that we added to the root node. diff --git a/tests/verify_leave.erl b/tests/verify_leave.erl index 9bf079f38..52690241e 100644 --- a/tests/verify_leave.erl +++ b/tests/verify_leave.erl @@ -42,7 +42,7 @@ confirm() -> lager:info("Verify ~p no longer owns partitions and all nodes believe " "it is invalid", [Node2]), Remaining1 = Nodes -- [Node2], - rt:wait_until_nodes_agree_about_ownership(Remaining1), + rt_node:wait_until_nodes_agree_about_ownership(Remaining1), [?assertEqual(invalid, status_of_according_to(Node2, Node)) || Node <- Remaining1], %% Have node1 remove node3 @@ -54,6 +54,6 @@ confirm() -> lager:info("Verify ~p no longer owns partitions, and all nodes believe " "it is invalid", [Node3]), Remaining2 = Remaining1 -- [Node3], - rt:wait_until_nodes_agree_about_ownership(Remaining2), + rt_node:wait_until_nodes_agree_about_ownership(Remaining2), [?assertEqual(invalid, status_of_according_to(Node3, Node)) || Node <- Remaining2], pass. diff --git a/tests/verify_listkeys.erl b/tests/verify_listkeys.erl index 2a51f2063..d4edd58e5 100644 --- a/tests/verify_listkeys.erl +++ b/tests/verify_listkeys.erl @@ -30,7 +30,7 @@ confirm() -> [Node1, Node2, Node3, Node4] = Nodes = rt_cluster:deploy_nodes(4), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes)), lager:info("Nodes deployed, but not joined."), diff --git a/tests/verify_listkeys_eqcfsm.erl b/tests/verify_listkeys_eqcfsm.erl index d8ef9d672..d7737f338 100644 --- a/tests/verify_listkeys_eqcfsm.erl +++ b/tests/verify_listkeys_eqcfsm.erl @@ -179,7 +179,7 @@ log_transition(S) -> %% ==================================================================== setup_cluster(NumNodes) -> Nodes = rt_cluster:build_cluster(NumNodes), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes)), ?assertEqual(ok, rt:wait_until_transfers_complete(Nodes)), Node = hd(Nodes), [begin diff --git a/tests/verify_membackend.erl b/tests/verify_membackend.erl index 9b81eed99..ded71c846 100644 --- a/tests/verify_membackend.erl +++ b/tests/verify_membackend.erl @@ -86,7 +86,7 @@ check_leave_and_expiry(NodeA, NodeB) -> rt:join(NodeB, NodeA), - ?assertEqual(ok, rt:wait_until_nodes_ready([NodeA, NodeB])), + ?assertEqual(ok, rt_node:wait_until_nodes_ready([NodeA, NodeB])), rt:wait_until_no_pending_changes([NodeA, NodeB]), rt_node:leave(NodeB), diff --git a/tests/verify_riak_lager.erl b/tests/verify_riak_lager.erl index a11b6a03e..72f5b473e 100644 --- a/tests/verify_riak_lager.erl +++ b/tests/verify_riak_lager.erl @@ -30,7 +30,7 @@ confirm() -> lager:info("Staring a node"), Nodes = [Node] = rt_cluster:deploy_nodes(1), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes)), lager:info("Stopping that node"), rt_node:stop(Node), diff --git a/tests/verify_riak_stats.erl b/tests/verify_riak_stats.erl index e7a136c77..d05083bf3 100644 --- a/tests/verify_riak_stats.erl +++ b/tests/verify_riak_stats.erl @@ -26,7 +26,7 @@ confirm() -> Nodes = rt_cluster:deploy_nodes(1), [Node1] = Nodes, - ?assertEqual(ok, rt:wait_until_nodes_ready([Node1])), + ?assertEqual(ok, rt_node:wait_until_nodes_ready([Node1])), Stats1 = get_stats(Node1), %% make sure a set of stats have valid values verify_nz(Stats1,[<<"cpu_nprocs">>, diff --git a/tests/verify_secondary_index_reformat.erl b/tests/verify_secondary_index_reformat.erl index ced7f813e..99a3d9fce 100644 --- a/tests/verify_secondary_index_reformat.erl +++ b/tests/verify_secondary_index_reformat.erl @@ -25,7 +25,7 @@ confirm() -> [Node] = rt_cluster:build_cluster([legacy]), - rt:wait_until_nodes_ready([Node]), + rt_node:wait_until_nodes_ready([Node]), check_fixed_index_statuses(Node, undefined), diff --git a/tests/verify_snmp.erl b/tests/verify_snmp.erl index 721969868..0385fccb6 100644 --- a/tests/verify_snmp.erl +++ b/tests/verify_snmp.erl @@ -28,7 +28,7 @@ confirm() -> %% Bring up a small cluster [Node1] = rt_cluster:deploy_nodes(1), - ?assertEqual(ok, rt:wait_until_nodes_ready([Node1])), + ?assertEqual(ok, rt_node:wait_until_nodes_ready([Node1])), Keys = [{vnodeGets,<<"vnode_gets">>}, {vnodePuts,<<"vnode_puts">>}, diff --git a/tests/verify_staged_clustering.erl b/tests/verify_staged_clustering.erl index deda5b906..e117ae272 100644 --- a/tests/verify_staged_clustering.erl +++ b/tests/verify_staged_clustering.erl @@ -45,7 +45,7 @@ confirm() -> commit_staged(Node1), lager:info("Ensure that ~p now own all partitions", [Nodes123]), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes123)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes123)), ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes123)), rt_ring:assert_nodes_agree_about_ownership(Nodes123), @@ -62,7 +62,7 @@ confirm() -> Nodes134 = [Node1, Node3, Node4], lager:info("Ensure that ~p now own all partitions", [Nodes134]), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes134)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes134)), ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes134)), rt_ring:assert_nodes_agree_about_ownership(Nodes134), @@ -83,7 +83,7 @@ confirm() -> Nodes124 = [Node1, Node2, Node4], lager:info("Ensure that ~p now own all partitions", [Nodes124]), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes124)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes124)), ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes124)), rt_ring:assert_nodes_agree_about_ownership(Nodes124), diff --git a/tests/verify_tick_change.erl b/tests/verify_tick_change.erl index cf8c12838..32f6d9040 100644 --- a/tests/verify_tick_change.erl +++ b/tests/verify_tick_change.erl @@ -28,7 +28,7 @@ confirm() -> rt_config:set_conf(all, [{"buckets.default.allow_mult", "false"}]), NewConfig = [], Nodes = rt_cluster:build_cluster(ClusterSize, NewConfig), - ?assertEqual(ok, rt:wait_until_nodes_ready(Nodes)), + ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes)), [Node1|_] = Nodes, Bucket = <<"systest">>, Start = 0, End = 100, From 445edd039ccc5d5b2bd0bd7473451923687bde82 Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Wed, 30 Jul 2014 23:03:40 -0400 Subject: [PATCH 15/17] Move rt:is_pingable to rt_node. --- src/rt.erl | 5 ----- src/rt_cluster.erl | 2 +- src/rt_node.erl | 7 ++++++- tests/basic_command_line.erl | 6 +++--- tests/ensemble_basic2.erl | 2 +- tests/gh_riak_core_155.erl | 2 +- tests/pr_pw.erl | 4 ++-- tests/replication/repl_aae_fullsync.erl | 2 +- tests/replication/repl_util.erl | 2 +- tests/riak_control_authentication.erl | 2 +- tests/verify_secondary_index_reformat.erl | 4 ++-- 11 files changed, 19 insertions(+), 19 deletions(-) diff --git a/src/rt.erl b/src/rt.erl index 341fad7ff..e5bcf77e8 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -43,7 +43,6 @@ get_replica/5, get_version/0, is_mixed_cluster/1, - is_pingable/1, load_modules_on_nodes/2, log_to_nodes/2, log_to_nodes/3, @@ -253,10 +252,6 @@ load_modules_on_nodes([Module | MoreModules], Nodes) %%% Status / Wait Functions %%%=================================================================== -%% @doc Is the `Node' up according to net_adm:ping -is_pingable(Node) -> - net_adm:ping(Node) =:= pong. - is_mixed_cluster(Nodes) when is_list(Nodes) -> %% If the nodes are bad, we don't care what version they are {Versions, _BadNodes} = rpc:multicall(Nodes, init, script_id, [], rt_config:get(rt_max_wait_time)), diff --git a/src/rt_cluster.erl b/src/rt_cluster.erl index 5a581bf31..26470e122 100644 --- a/src/rt_cluster.erl +++ b/src/rt_cluster.erl @@ -189,7 +189,7 @@ try_nodes_ready(Nodes, N, SleepMs) -> %% @doc Stop nodes and wipe out their data directories clean_cluster(Nodes) when is_list(Nodes) -> - [rt:stop_and_wait(Node) || Node <- Nodes], + [rt_node:stop_and_wait(Node) || Node <- Nodes], clean_data_dir(Nodes). clean_data_dir(Nodes) -> diff --git a/src/rt_node.erl b/src/rt_node.erl index 55a53b3a0..0c5e1d7f9 100644 --- a/src/rt_node.erl +++ b/src/rt_node.erl @@ -42,7 +42,8 @@ brutal_kill/1, wait_until_nodes_ready/1, wait_until_owners_according_to/2, - wait_until_nodes_agree_about_ownership/1]). + wait_until_nodes_agree_about_ownership/1, + is_pingable/1]). -define(HARNESS, (rt_config:get(rt_harness))). @@ -219,3 +220,7 @@ wait_until_nodes_agree_about_ownership(Nodes) -> lager:info("Wait until nodes agree about ownership ~p", [Nodes]), Results = [ wait_until_owners_according_to(Node, Nodes) || Node <- Nodes ], ?assert(lists:all(fun(X) -> ok =:= X end, Results)). + +%% @doc Is the `Node' up according to net_adm:ping +is_pingable(Node) -> + net_adm:ping(Node) =:= pong. diff --git a/tests/basic_command_line.erl b/tests/basic_command_line.erl index eda201190..d71b6e833 100644 --- a/tests/basic_command_line.erl +++ b/tests/basic_command_line.erl @@ -87,15 +87,15 @@ start_test(Node) -> {ok, StartPass} = rt_cmd_line:riak(Node, ["start"]), ?assertMatch(StartPass, ""), - rt:stop_and_wait(Node), + rt_node:stop_and_wait(Node), ok. stop_test(Node) -> - ?assert(rt:is_pingable(Node)), + ?assert(rt_node:is_pingable(Node)), {ok, "ok\n"} = rt_cmd_line:riak(Node, "stop"), - ?assertNot(rt:is_pingable(Node)), + ?assertNot(rt_node:is_pingable(Node)), ok. ping_up_test(Node) -> diff --git a/tests/ensemble_basic2.erl b/tests/ensemble_basic2.erl index 6af2f2796..77ec73c9b 100644 --- a/tests/ensemble_basic2.erl +++ b/tests/ensemble_basic2.erl @@ -42,7 +42,7 @@ confirm() -> timer:sleep(Delay), riak_kv_vnode_orig:init_orig(Args) end}}]}), - rt:stop_and_wait(Node), + rt_node:stop_and_wait(Node), rt_node:start(Node), lager:info("Polling peers while riak_kv starts. We should see none"), UpNoPeersFun = diff --git a/tests/gh_riak_core_155.erl b/tests/gh_riak_core_155.erl index 1e6ffd604..522e088f2 100644 --- a/tests/gh_riak_core_155.erl +++ b/tests/gh_riak_core_155.erl @@ -38,7 +38,7 @@ confirm() -> %% Restart node, add intercept that delay proxy startup, and issue gets. %% Gets will come in before proxies started, and should trigger crash. - rt:stop_and_wait(Node), + rt_node:stop_and_wait(Node), rt_node:async_start(Node), rt:wait_until_pingable(Node), rt_intercept:load_intercepts([Node]), diff --git a/tests/pr_pw.erl b/tests/pr_pw.erl index 5cecf6e6d..5e1e5c802 100644 --- a/tests/pr_pw.erl +++ b/tests/pr_pw.erl @@ -68,7 +68,7 @@ confirm() -> ?assertMatch({ok, _}, C:get(<<"foo">>, <<"bar">>, [{pr, quorum}])), - rt:stop_and_wait(Node), + rt_node:stop_and_wait(Node), %% there's now a fallback in the preflist, so PR/PW won't be satisfied %% anymore @@ -110,7 +110,7 @@ confirm() -> ?assertEqual({error, timeout}, C:put(Obj, [{pw, all}])), %% reboot the node - rt:stop_and_wait(Node2), + rt_node:stop_and_wait(Node2), rt_node:start_and_wait(Node2), rt:wait_for_service(Node2, riak_kv), diff --git a/tests/replication/repl_aae_fullsync.erl b/tests/replication/repl_aae_fullsync.erl index 94f1d52b0..6bbcbbaea 100644 --- a/tests/replication/repl_aae_fullsync.erl +++ b/tests/replication/repl_aae_fullsync.erl @@ -571,7 +571,7 @@ validate_intercepted_fullsync(InterceptTarget, NumIndicies), %% Reboot node. - rt:stop_and_wait(InterceptTarget), + rt_node:stop_and_wait(InterceptTarget), rt_node:start_and_wait(InterceptTarget), %% Wait for riak_kv and riak_repl to initialize. diff --git a/tests/replication/repl_util.erl b/tests/replication/repl_util.erl index bbdd26998..d4e642cf1 100644 --- a/tests/replication/repl_util.erl +++ b/tests/replication/repl_util.erl @@ -612,7 +612,7 @@ validate_intercepted_fullsync(InterceptTarget, NumIndicies), %% Reboot node. - rt:stop_and_wait(InterceptTarget), + rt_node:stop_and_wait(InterceptTarget), rt_node:start_and_wait(InterceptTarget), %% Wait for riak_kv and riak_repl to initialize. diff --git a/tests/riak_control_authentication.erl b/tests/riak_control_authentication.erl index 3e8db3b00..88b495aca 100644 --- a/tests/riak_control_authentication.erl +++ b/tests/riak_control_authentication.erl @@ -217,7 +217,7 @@ build_singleton_cluster(Vsn, Config) -> %% the supervisor starts, we need to restart to ensure settings %% take effect. Node = lists:nth(1, Nodes), - rt:stop_and_wait(Node), + rt_node:stop_and_wait(Node), rt_node:start_and_wait(Node), rt:wait_for_service(Node, riak_kv), diff --git a/tests/verify_secondary_index_reformat.erl b/tests/verify_secondary_index_reformat.erl index 99a3d9fce..9ea33f1a2 100644 --- a/tests/verify_secondary_index_reformat.erl +++ b/tests/verify_secondary_index_reformat.erl @@ -73,7 +73,7 @@ confirm() -> rt:systest_write(Node, 10, 1), lager:info("restarting node"), - rt:stop_and_wait(Node), + rt_node:stop_and_wait(Node), rt_node:start(Node), rt:wait_for_service(Node, riak_kv), @@ -84,7 +84,7 @@ confirm() -> check_fixed_index_statuses(Node, false), - rt:stop_and_wait(Node), + rt_node:stop_and_wait(Node), rt_node:start(Node), rt:wait_for_service(Node, riak_kv), check_fixed_index_statuses(Node, false), From 9369104536ff4fe2ac947c0c87255a74bfe98312 Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Thu, 31 Jul 2014 11:52:14 -0400 Subject: [PATCH 16/17] Move systest read/write functions from rt module to rt_systest. --- src/riak_test_runner.erl | 2 +- src/rt.erl | 162 +--------------- src/rt_systest.erl | 179 ++++++++++++++++++ tests/gh_riak_core_154.erl | 6 +- tests/gh_riak_core_176.erl | 6 +- tests/gh_riak_kv_765.erl | 2 +- tests/jmx_verify.erl | 2 +- tests/loaded_upgrade.erl | 4 +- tests/pipe_verify_handoff.erl | 2 +- tests/pipe_verify_handoff_blocking.erl | 2 +- tests/replication/repl_aae_fullsync.erl | 2 +- tests/replication/repl_aae_fullsync_util.erl | 2 +- tests/replication/repl_rt_cascading_rtq.erl | 2 +- tests/replication/repl_rt_overload.erl | 2 +- tests/replication/repl_util.erl | 12 +- tests/replication/replication.erl | 22 +-- tests/replication/replication2.erl | 18 +- tests/replication/replication2_pg.erl | 4 +- .../replication2_rt_sink_connection.erl | 2 +- tests/verify_basic_upgrade.erl | 8 +- tests/verify_build_cluster.erl | 10 +- tests/verify_capabilities.erl | 6 +- tests/verify_down.erl | 4 +- tests/verify_dynamic_ring.erl | 20 +- tests/verify_handoff.erl | 10 +- tests/verify_handoff_mixed.erl | 2 +- tests/verify_listkeys.erl | 2 +- tests/verify_membackend.erl | 18 +- tests/verify_mr_prereduce_node_down.erl | 2 +- tests/verify_riak_object_reformat.erl | 6 +- tests/verify_riak_stats.erl | 2 +- tests/verify_secondary_index_reformat.erl | 2 +- tests/verify_snmp.erl | 4 +- tests/verify_tick_change.erl | 4 +- 34 files changed, 277 insertions(+), 256 deletions(-) create mode 100644 src/rt_systest.erl diff --git a/src/riak_test_runner.erl b/src/riak_test_runner.erl index b245d8cee..54d39007f 100644 --- a/src/riak_test_runner.erl +++ b/src/riak_test_runner.erl @@ -159,7 +159,7 @@ compose_confirm_fun({ConfirmMod, ConfirmFun}, InitialResult = ConfirmMod:ConfirmFun(SetupData, MetaData), OtherResults = [begin ensure_all_nodes_running(Nodes), - _ = rt:upgrade(Node, UpgradeVersion), + _ = rt_node:upgrade(Node, UpgradeVersion), _ = rt_cluster:maybe_wait_for_transfers(Nodes, WaitForTransfers), ConfirmMod:ConfirmFun(SetupData, MetaData) end || Node <- Nodes], diff --git a/src/rt.erl b/src/rt.erl index e5bcf77e8..04befd855 100644 --- a/src/rt.erl +++ b/src/rt.erl @@ -52,18 +52,11 @@ rpc_get_env/2, setup_harness/2, setup_log_capture/1, - stream_cmd/1, stream_cmd/2, + stream_cmd/1, + stream_cmd/2, spawn_cmd/1, spawn_cmd/2, str/2, - systest_read/2, - systest_read/3, - systest_read/5, - systest_read/6, - systest_write/2, - systest_write/3, - systest_write/5, - systest_write/6, wait_for_cluster_service/2, wait_for_cmd/1, wait_for_service/2, @@ -511,157 +504,6 @@ cap_equal(Val, Cap) when is_list(Cap) -> cap_equal(Val, Cap) -> Val == Cap. -%%%=================================================================== -%%% Basic Read/Write Functions -%%%=================================================================== - -systest_write(Node, Size) -> - systest_write(Node, Size, 2). - -systest_write(Node, Size, W) -> - systest_write(Node, 1, Size, <<"systest">>, W). - -systest_write(Node, Start, End, Bucket, W) -> - systest_write(Node, Start, End, Bucket, W, <<>>). - -%% @doc Write (End-Start)+1 objects to Node. Objects keys will be -%% `Start', `Start+1' ... `End', each encoded as a 32-bit binary -%% (`<>'). Object values are the same as their keys. -%% -%% The return value of this function is a list of errors -%% encountered. If all writes were successful, return value is an -%% empty list. Each error has the form `{N :: integer(), Error :: term()}', -%% where N is the unencoded key of the object that failed to store. -systest_write(Node, Start, End, Bucket, W, CommonValBin) - when is_binary(CommonValBin) -> - rt:wait_for_service(Node, riak_kv), - {ok, C} = riak:client_connect(Node), - F = fun(N, Acc) -> - Obj = riak_object:new(Bucket, <>, - <>), - try C:put(Obj, W) of - ok -> - Acc; - Other -> - [{N, Other} | Acc] - catch - What:Why -> - [{N, {What, Why}} | Acc] - end - end, - lists:foldl(F, [], lists:seq(Start, End)). - -systest_read(Node, Size) -> - systest_read(Node, Size, 2). - -systest_read(Node, Size, R) -> - systest_read(Node, 1, Size, <<"systest">>, R). - -systest_read(Node, Start, End, Bucket, R) -> - systest_read(Node, Start, End, Bucket, R, <<>>). - -systest_read(Node, Start, End, Bucket, R, CommonValBin) - when is_binary(CommonValBin) -> - systest_read(Node, Start, End, Bucket, R, CommonValBin, false). - -%% Read and verify the values of objects written with -%% `systest_write'. The `SquashSiblings' parameter exists to -%% optionally allow handling of siblings whose value and metadata are -%% identical except for the dot. This goal is to facilitate testing -%% with DVV enabled because siblings can be created internally by Riak -%% in cases where testing with DVV disabled would not. Such cases -%% include writes that happen during handoff when a vnode forwards -%% writes, but also performs them locally or when a put coordinator -%% fails to send an acknowledgment within the timeout window and -%% another put request is issued. -systest_read(Node, Start, End, Bucket, R, CommonValBin, SquashSiblings) - when is_binary(CommonValBin) -> - rt:wait_for_service(Node, riak_kv), - {ok, C} = riak:client_connect(Node), - lists:foldl(systest_read_fold_fun(C, Bucket, R, CommonValBin, SquashSiblings), - [], - lists:seq(Start, End)). - -systest_read_fold_fun(C, Bucket, R, CommonValBin, SquashSiblings) -> - fun(N, Acc) -> - GetRes = C:get(Bucket, <>, R), - Val = object_value(GetRes, SquashSiblings), - update_acc(value_matches(Val, N, CommonValBin), Val, N, Acc) - end. - -object_value({error, _}=Error, _) -> - Error; -object_value({ok, Obj}, SquashSiblings) -> - object_value(riak_object:value_count(Obj), Obj, SquashSiblings). - -object_value(1, Obj, _SquashSiblings) -> - riak_object:get_value(Obj); -object_value(_ValueCount, Obj, false) -> - riak_object:get_value(Obj); -object_value(_ValueCount, Obj, true) -> - lager:debug("Siblings detected for ~p:~p", [riak_object:bucket(Obj), riak_object:key(Obj)]), - Contents = riak_object:get_contents(Obj), - case lists:foldl(fun sibling_compare/2, {true, undefined}, Contents) of - {true, {_, _, _, Value}} -> - lager:debug("Siblings determined to be a single value"), - Value; - {false, _} -> - {error, siblings} - end. - -sibling_compare({MetaData, Value}, {true, undefined}) -> - Dot = case dict:find(<<"dot">>, MetaData) of - {ok, DotVal} -> - DotVal; - error -> - {error, no_dot} - end, - VTag = dict:fetch(<<"X-Riak-VTag">>, MetaData), - LastMod = dict:fetch(<<"X-Riak-Last-Modified">>, MetaData), - {true, {element(2, Dot), VTag, LastMod, Value}}; -sibling_compare(_, {false, _}=InvalidMatch) -> - InvalidMatch; -sibling_compare({MetaData, Value}, {true, PreviousElements}) -> - Dot = case dict:find(<<"dot">>, MetaData) of - {ok, DotVal} -> - DotVal; - error -> - {error, no_dot} - end, - VTag = dict:fetch(<<"X-Riak-VTag">>, MetaData), - LastMod = dict:fetch(<<"X-Riak-Last-Modified">>, MetaData), - ComparisonElements = {element(2, Dot), VTag, LastMod, Value}, - {ComparisonElements =:= PreviousElements, ComparisonElements}. - -value_matches(<>, N, CommonValBin) -> - true; -value_matches(_WrongVal, _N, _CommonValBin) -> - false. - -update_acc(true, _, _, Acc) -> - Acc; -update_acc(false, {error, _}=Val, N, Acc) -> - [{N, Val} | Acc]; -update_acc(false, Val, N, Acc) -> - [{N, {wrong_val, Val}} | Acc]. - -verify_systest_value(N, Acc, CommonValBin, Obj) -> - Values = riak_object:get_values(Obj), - Res = [begin - case V of - <> -> - ok; - _WrongVal -> - wrong_val - end - end || V <- Values], - case lists:any(fun(X) -> X =:= ok end, Res) of - true -> - Acc; - false -> - [{N, {wrong_val, hd(Values)}} | Acc] - end. - % @doc Reads a single replica of a value. This issues a get command directly % to the vnode handling the Nth primary partition of the object's preflist. get_replica(Node, Bucket, Key, I, N) -> diff --git a/src/rt_systest.erl b/src/rt_systest.erl new file mode 100644 index 000000000..918496bb0 --- /dev/null +++ b/src/rt_systest.erl @@ -0,0 +1,179 @@ +%% ------------------------------------------------------------------- +%% +%% Copyright (c) 2013-2014 Basho Technologies, Inc. +%% +%% This file is provided to you under the Apache License, +%% Version 2.0 (the "License"); you may not use this file +%% except in compliance with the License. You may obtain +%% a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, +%% software distributed under the License is distributed on an +%% "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +%% KIND, either express or implied. See the License for the +%% specific language governing permissions and limitations +%% under the License. +%% +%% ------------------------------------------------------------------- +-module(rt_systest). +-include("rt.hrl"). +-include_lib("eunit/include/eunit.hrl"). + +-export([read/2, + read/3, + read/5, + read/6, + write/2, + write/3, + write/5, + write/6, + verify_systest_value/4]). + +write(Node, Size) -> + write(Node, Size, 2). + +write(Node, Size, W) -> + write(Node, 1, Size, <<"systest">>, W). + +write(Node, Start, End, Bucket, W) -> + write(Node, Start, End, Bucket, W, <<>>). + +%% @doc Write (End-Start)+1 objects to Node. Objects keys will be +%% `Start', `Start+1' ... `End', each encoded as a 32-bit binary +%% (`<>'). Object values are the same as their keys. +%% +%% The return value of this function is a list of errors +%% encountered. If all writes were successful, return value is an +%% empty list. Each error has the form `{N :: integer(), Error :: term()}', +%% where N is the unencoded key of the object that failed to store. +write(Node, Start, End, Bucket, W, CommonValBin) + when is_binary(CommonValBin) -> + rt:wait_for_service(Node, riak_kv), + {ok, C} = riak:client_connect(Node), + F = fun(N, Acc) -> + Obj = riak_object:new(Bucket, <>, + <>), + try C:put(Obj, W) of + ok -> + Acc; + Other -> + [{N, Other} | Acc] + catch + What:Why -> + [{N, {What, Why}} | Acc] + end + end, + lists:foldl(F, [], lists:seq(Start, End)). + +read(Node, Size) -> + read(Node, Size, 2). + +read(Node, Size, R) -> + read(Node, 1, Size, <<"systest">>, R). + +read(Node, Start, End, Bucket, R) -> + read(Node, Start, End, Bucket, R, <<>>). + +read(Node, Start, End, Bucket, R, CommonValBin) + when is_binary(CommonValBin) -> + read(Node, Start, End, Bucket, R, CommonValBin, false). + +%% Read and verify the values of objects written with +%% `systest_write'. The `SquashSiblings' parameter exists to +%% optionally allow handling of siblings whose value and metadata are +%% identical except for the dot. This goal is to facilitate testing +%% with DVV enabled because siblings can be created internally by Riak +%% in cases where testing with DVV disabled would not. Such cases +%% include writes that happen during handoff when a vnode forwards +%% writes, but also performs them locally or when a put coordinator +%% fails to send an acknowledgment within the timeout window and +%% another put request is issued. +read(Node, Start, End, Bucket, R, CommonValBin, SquashSiblings) + when is_binary(CommonValBin) -> + rt:wait_for_service(Node, riak_kv), + {ok, C} = riak:client_connect(Node), + lists:foldl(read_fold_fun(C, Bucket, R, CommonValBin, SquashSiblings), + [], + lists:seq(Start, End)). + +read_fold_fun(C, Bucket, R, CommonValBin, SquashSiblings) -> + fun(N, Acc) -> + GetRes = C:get(Bucket, <>, R), + Val = object_value(GetRes, SquashSiblings), + update_acc(value_matches(Val, N, CommonValBin), Val, N, Acc) + end. + +object_value({error, _}=Error, _) -> + Error; +object_value({ok, Obj}, SquashSiblings) -> + object_value(riak_object:value_count(Obj), Obj, SquashSiblings). + +object_value(1, Obj, _SquashSiblings) -> + riak_object:get_value(Obj); +object_value(_ValueCount, Obj, false) -> + riak_object:get_value(Obj); +object_value(_ValueCount, Obj, true) -> + lager:debug("Siblings detected for ~p:~p", [riak_object:bucket(Obj), riak_object:key(Obj)]), + Contents = riak_object:get_contents(Obj), + case lists:foldl(fun sibling_compare/2, {true, undefined}, Contents) of + {true, {_, _, _, Value}} -> + lager:debug("Siblings determined to be a single value"), + Value; + {false, _} -> + {error, siblings} + end. + +sibling_compare({MetaData, Value}, {true, undefined}) -> + Dot = case dict:find(<<"dot">>, MetaData) of + {ok, DotVal} -> + DotVal; + error -> + {error, no_dot} + end, + VTag = dict:fetch(<<"X-Riak-VTag">>, MetaData), + LastMod = dict:fetch(<<"X-Riak-Last-Modified">>, MetaData), + {true, {element(2, Dot), VTag, LastMod, Value}}; +sibling_compare(_, {false, _}=InvalidMatch) -> + InvalidMatch; +sibling_compare({MetaData, Value}, {true, PreviousElements}) -> + Dot = case dict:find(<<"dot">>, MetaData) of + {ok, DotVal} -> + DotVal; + error -> + {error, no_dot} + end, + VTag = dict:fetch(<<"X-Riak-VTag">>, MetaData), + LastMod = dict:fetch(<<"X-Riak-Last-Modified">>, MetaData), + ComparisonElements = {element(2, Dot), VTag, LastMod, Value}, + {ComparisonElements =:= PreviousElements, ComparisonElements}. + +value_matches(<>, N, CommonValBin) -> + true; +value_matches(_WrongVal, _N, _CommonValBin) -> + false. +update_acc(true, _, _, Acc) -> + Acc; +update_acc(false, {error, _}=Val, N, Acc) -> + [{N, Val} | Acc]; +update_acc(false, Val, N, Acc) -> + [{N, {wrong_val, Val}} | Acc]. + +verify_systest_value(N, Acc, CommonValBin, Obj) -> + Values = riak_object:get_values(Obj), + Res = [begin + case V of + <> -> + ok; + _WrongVal -> + wrong_val + end + end || V <- Values], + case lists:any(fun(X) -> X =:= ok end, Res) of + true -> + Acc; + false -> + [{N, {wrong_val, hd(Values)}} | Acc] + end. + diff --git a/tests/gh_riak_core_154.erl b/tests/gh_riak_core_154.erl index 37beaba37..c477b4e53 100644 --- a/tests/gh_riak_core_154.erl +++ b/tests/gh_riak_core_154.erl @@ -35,14 +35,14 @@ confirm() -> lager:info("Write data while ~p is offline", [Node2]), rt_node:stop(Node2), rt:wait_until_unpingable(Node2), - ?assertEqual([], rt:systest_write(Node1, 1000, 3)), + ?assertEqual([], rt_systest:write(Node1, 1000, 3)), lager:info("Verify that ~p is missing data", [Node2]), rt_node:start(Node2), rt_node:stop(Node1), rt:wait_until_unpingable(Node1), ?assertMatch([{_,{error,notfound}}|_], - rt:systest_read(Node2, 1000, 3)), + rt_systest:read(Node2, 1000, 3)), lager:info("Restart ~p and wait for handoff to occur", [Node1]), rt_node:start(Node1), @@ -51,7 +51,7 @@ confirm() -> lager:info("Verify that ~p has all data", [Node2]), rt_node:stop(Node1), - ?assertEqual([], rt:systest_read(Node2, 1000, 3)), + ?assertEqual([], rt_systest:read(Node2, 1000, 3)), lager:info("gh_riak_core_154: passed"), pass. diff --git a/tests/gh_riak_core_176.erl b/tests/gh_riak_core_176.erl index df6664372..9860a656e 100644 --- a/tests/gh_riak_core_176.erl +++ b/tests/gh_riak_core_176.erl @@ -51,11 +51,11 @@ confirm() -> rt:wait_for_service(Node2, riak_kv), lager:info("Write data to the cluster"), - rt:systest_write(Node1, 100), + rt_systest:write(Node1, 100), lager:info("Join ~p to the cluster and wait for handoff to finish", [Node2]), - rt:join(Node2, Node1), + rt_node:join(Node2, Node1), ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes12)), ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes12)), rt_node:wait_until_nodes_agree_about_ownership(Nodes12), @@ -68,7 +68,7 @@ confirm() -> lager:info("Join ~p to the cluster and wait for handoff to finish", [Node3]), rt:wait_for_service(Node3, riak_kv), - rt:join(Node3, Node1), + rt_node:join(Node3, Node1), ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes123)), ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes123)), rt_node:wait_until_nodes_agree_about_ownership(Nodes123), diff --git a/tests/gh_riak_kv_765.erl b/tests/gh_riak_kv_765.erl index 834b6c964..f43a45802 100644 --- a/tests/gh_riak_kv_765.erl +++ b/tests/gh_riak_kv_765.erl @@ -71,7 +71,7 @@ check_throttle_and_expiration() -> timer:sleep(2000), lager:info("Write 1000 keys"), - rt:systest_write(Node, 1000), + rt_systest:write(Node, 1000), enable_aae(Node), time_build(Node), Duration1 = rebuild(Node, 30000, 1000), diff --git a/tests/jmx_verify.erl b/tests/jmx_verify.erl index a4dbd50ec..a657d9fae 100644 --- a/tests/jmx_verify.erl +++ b/tests/jmx_verify.erl @@ -82,7 +82,7 @@ confirm() -> Pid = rt_pb:pbc(Node1), JMX3 = jmx_dump(JMXDumpCmd), - rt:systest_write(Node1, 1), + rt_systest:write(Node1, 1), %% make sure the stats that were supposed to increment did verify_inc(JMX2, JMX3, [{<<"pbc_connects_total">>, 1}, {<<"pbc_connects">>, 1}, diff --git a/tests/loaded_upgrade.erl b/tests/loaded_upgrade.erl index 7e89350fb..5feab5801 100644 --- a/tests/loaded_upgrade.erl +++ b/tests/loaded_upgrade.erl @@ -116,8 +116,8 @@ seed_cluster(Nodes=[Node1|_]) -> %% For List Keys lager:info("Writing 100 keys to ~p", [Node1]), - rt:systest_write(Node1, 100, 3), - ?assertEqual([], rt:systest_read(Node1, 100, 1)), + rt_systest:write(Node1, 100, 3), + ?assertEqual([], rt_systest:read(Node1, 100, 1)), seed(Node1, 0, 100, fun(Key) -> Bin = iolist_to_binary(io_lib:format("~p", [Key])), diff --git a/tests/pipe_verify_handoff.erl b/tests/pipe_verify_handoff.erl index c6bdaaad3..61cd440d1 100644 --- a/tests/pipe_verify_handoff.erl +++ b/tests/pipe_verify_handoff.erl @@ -107,7 +107,7 @@ confirm() -> lager:info("Join Secondary to Primary"), %% Give slave a chance to start and master to notice it. - rt:join(Secondary, Primary), + rt_node:join(Secondary, Primary), rt:wait_until_no_pending_changes(Nodes), rt_node:wait_until_nodes_agree_about_ownership(Nodes), diff --git a/tests/pipe_verify_handoff_blocking.erl b/tests/pipe_verify_handoff_blocking.erl index 10086211f..6a5313b88 100644 --- a/tests/pipe_verify_handoff_blocking.erl +++ b/tests/pipe_verify_handoff_blocking.erl @@ -96,7 +96,7 @@ confirm() -> lager:info("Join Secondary to Primary"), %% Give slave a chance to start and master to notice it. - rt:join(Secondary, Primary), + rt_node:join(Secondary, Primary), rt:wait_until_no_pending_changes(Nodes), rt_node:wait_until_nodes_agree_about_ownership(Nodes), diff --git a/tests/replication/repl_aae_fullsync.erl b/tests/replication/repl_aae_fullsync.erl index 6bbcbbaea..81a7dcaed 100644 --- a/tests/replication/repl_aae_fullsync.erl +++ b/tests/replication/repl_aae_fullsync.erl @@ -612,5 +612,5 @@ write_to_cluster(Node, Start, End) -> %% of errors. read_from_cluster(Node, Start, End, Errors) -> lager:info("Reading ~p keys from node ~p.", [End - Start, Node]), - Res2 = rt:systest_read(Node, Start, End, ?TEST_BUCKET, 1), + Res2 = rt_systest:read(Node, Start, End, ?TEST_BUCKET, 1), ?assertEqual(Errors, length(Res2)). diff --git a/tests/replication/repl_aae_fullsync_util.erl b/tests/replication/repl_aae_fullsync_util.erl index 949d0dab9..becd2ebeb 100644 --- a/tests/replication/repl_aae_fullsync_util.erl +++ b/tests/replication/repl_aae_fullsync_util.erl @@ -73,7 +73,7 @@ prepare_cluster_data(TestBucket, NumKeysAOnly, _NumKeysBoth, [AFirst|_] = ANodes %% check that the keys we wrote initially aren't replicated yet, because %% we've disabled fullsync_on_connect lager:info("Check keys written before repl was connected are not present"), - Res2 = rt:systest_read(BFirst, 1, NumKeysAOnly, TestBucket, 1, <<>>, true), + Res2 = rt_systest:read(BFirst, 1, NumKeysAOnly, TestBucket, 1, <<>>, true), ?assertEqual(NumKeysAOnly, length(Res2)), %% wait for the AAE trees to be built so that we don't get a not_built error diff --git a/tests/replication/repl_rt_cascading_rtq.erl b/tests/replication/repl_rt_cascading_rtq.erl index 64939bae0..79b7fa353 100644 --- a/tests/replication/repl_rt_cascading_rtq.erl +++ b/tests/replication/repl_rt_cascading_rtq.erl @@ -181,5 +181,5 @@ write_to_cluster(Node, Start, End) -> %% of errors. read_from_cluster(Node, Start, End, Errors) -> lager:info("Reading ~p keys from node ~p.", [End - Start, Node]), - Res2 = rt:systest_read(Node, Start, End, ?TEST_BUCKET, 1), + Res2 = rt_systest:read(Node, Start, End, ?TEST_BUCKET, 1), ?assertEqual(Errors, length(Res2)). diff --git a/tests/replication/repl_rt_overload.erl b/tests/replication/repl_rt_overload.erl index e425e02b3..6b13cbd8f 100644 --- a/tests/replication/repl_rt_overload.erl +++ b/tests/replication/repl_rt_overload.erl @@ -89,7 +89,7 @@ verify_overload_writes(LeaderA, LeaderB) -> ?assertEqual([], repl_util:do_write(LeaderA, First, Last, TestBucket, 2)), lager:info("Reading ~p keys from ~p", [Last-First+1, LeaderB]), - NumReads = rt:systest_read(LeaderB, First, Last, TestBucket, 2), + NumReads = rt_systest:read(LeaderB, First, Last, TestBucket, 2), lager:info("systest_read saw ~p errors", [length(NumReads)]), diff --git a/tests/replication/repl_util.erl b/tests/replication/repl_util.erl index d4e642cf1..308754044 100644 --- a/tests/replication/repl_util.erl +++ b/tests/replication/repl_util.erl @@ -57,7 +57,7 @@ make_cluster(Nodes) -> [First|Rest] = Nodes, ?assertEqual(ok, rt_node:wait_until_nodes_ready(Nodes)), [rt:wait_for_service(N, riak_kv) || N <- Nodes], - [rt:join(Node, First) || Node <- Rest], + [rt_node:join(Node, First) || Node <- Rest], ?assertEqual(ok, rt:wait_until_no_pending_changes(Nodes)). name_cluster(Node, Name) -> @@ -197,10 +197,10 @@ wait_until_fullsync_stopped(SourceLeader) -> wait_for_reads(Node, Start, End, Bucket, R) -> rt:wait_until(Node, fun(_) -> - Reads = rt:systest_read(Node, Start, End, Bucket, R, <<>>, true), + Reads = rt_systest:read(Node, Start, End, Bucket, R, <<>>, true), Reads == [] end), - Reads = rt:systest_read(Node, Start, End, Bucket, R, <<>>, true), + Reads = rt_systest:read(Node, Start, End, Bucket, R, <<>>, true), lager:info("Reads: ~p", [Reads]), length(Reads). @@ -442,14 +442,14 @@ stop_realtime(Node, Cluster) -> ?assertEqual(ok, Res). do_write(Node, Start, End, Bucket, W) -> - case rt:systest_write(Node, Start, End, Bucket, W) of + case rt_systest:write(Node, Start, End, Bucket, W) of [] -> []; Errors -> lager:warning("~p errors while writing: ~p", [length(Errors), Errors]), timer:sleep(1000), - lists:flatten([rt:systest_write(Node, S, S, Bucket, W) || + lists:flatten([rt_systest:write(Node, S, S, Bucket, W) || {S, _Error} <- Errors]) end. @@ -559,7 +559,7 @@ read_from_cluster(Node, Start, End, Bucket, Errors) -> %% of errors. read_from_cluster(Node, Start, End, Bucket, Errors, Quorum) -> lager:info("Reading ~p keys from node ~p.", [End - Start, Node]), - Res2 = rt:systest_read(Node, Start, End, Bucket, Quorum, <<>>, true), + Res2 = rt_systest:read(Node, Start, End, Bucket, Quorum, <<>>, true), ?assertEqual(Errors, length(Res2)). %% @doc Assert we can perform one fullsync cycle, and that the number of diff --git a/tests/replication/replication.erl b/tests/replication/replication.erl index e09502230..ae503e7e1 100644 --- a/tests/replication/replication.erl +++ b/tests/replication/replication.erl @@ -111,7 +111,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> %% check that the keys we wrote initially aren't replicated yet, because %% we've disabled fullsync_on_connect lager:info("Check keys written before repl was connected are not present"), - Res2 = rt:systest_read(BFirst, 1, 100, TestBucket, 2), + Res2 = rt_systest:read(BFirst, 1, 100, TestBucket, 2), ?assertEqual(100, length(Res2)), start_and_wait_until_fullsync_complete(LeaderA), @@ -311,19 +311,19 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> FullsyncOnly, 2)), lager:info("Check the fullsync only bucket didn't replicate the writes"), - Res6 = rt:systest_read(BSecond, 1, 100, FullsyncOnly, 2), + Res6 = rt_systest:read(BSecond, 1, 100, FullsyncOnly, 2), ?assertEqual(100, length(Res6)), lager:info("Check the realtime only bucket that was written to offline " "isn't replicated"), - Res7 = rt:systest_read(BSecond, 1, 100, RealtimeOnly, 2), + Res7 = rt_systest:read(BSecond, 1, 100, RealtimeOnly, 2), ?assertEqual(100, length(Res7)); _ -> timer:sleep(1000) end, lager:info("Check the {repl, false} bucket didn't replicate"), - Res8 = rt:systest_read(BSecond, 1, 100, NoRepl, 2), + Res8 = rt_systest:read(BSecond, 1, 100, NoRepl, 2), ?assertEqual(100, length(Res8)), %% do a fullsync, make sure that fullsync_only is replicated, but @@ -337,7 +337,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> FullsyncOnly, 2)), lager:info("Check realtime only bucket didn't replicate"), - Res10 = rt:systest_read(BSecond, 1, 100, RealtimeOnly, 2), + Res10 = rt_systest:read(BSecond, 1, 100, RealtimeOnly, 2), ?assertEqual(100, length(Res10)), @@ -352,14 +352,14 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> RealtimeOnly, 2)), lager:info("Check the older keys in the realtime bucket did not replicate"), - Res12 = rt:systest_read(BSecond, 1, 100, RealtimeOnly, 2), + Res12 = rt_systest:read(BSecond, 1, 100, RealtimeOnly, 2), ?assertEqual(100, length(Res12)); _ -> ok end, lager:info("Check {repl, false} bucket didn't replicate"), - Res13 = rt:systest_read(BSecond, 1, 100, NoRepl, 2), + Res13 = rt_systest:read(BSecond, 1, 100, NoRepl, 2), ?assertEqual(100, length(Res13)); _ -> ok @@ -690,20 +690,20 @@ wait_until_no_connection(Node) -> wait_for_reads(Node, Start, End, Bucket, R) -> rt:wait_until(Node, fun(_) -> - rt:systest_read(Node, Start, End, Bucket, R) == [] + rt_systest:read(Node, Start, End, Bucket, R) == [] end), - Reads = rt:systest_read(Node, Start, End, Bucket, R), + Reads = rt_systest:read(Node, Start, End, Bucket, R), lager:info("Reads: ~p", [Reads]), length(Reads). do_write(Node, Start, End, Bucket, W) -> - case rt:systest_write(Node, Start, End, Bucket, W) of + case rt_systest:write(Node, Start, End, Bucket, W) of [] -> []; Errors -> lager:warning("~p errors while writing: ~p", [length(Errors), Errors]), timer:sleep(1000), - lists:flatten([rt:systest_write(Node, S, S, Bucket, W) || + lists:flatten([rt_systest:write(Node, S, S, Bucket, W) || {S, _Error} <- Errors]) end. diff --git a/tests/replication/replication2.erl b/tests/replication/replication2.erl index 4e21ba944..3fb0d9a10 100644 --- a/tests/replication/replication2.erl +++ b/tests/replication/replication2.erl @@ -131,7 +131,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> %% check that the keys we wrote initially aren't replicated yet, because %% we've disabled fullsync_on_connect lager:info("Check keys written before repl was connected are not present"), - Res2 = rt:systest_read(BFirst, 1, 100, TestBucket, 2), + Res2 = rt_systest:read(BFirst, 1, 100, TestBucket, 2), ?assertEqual(100, length(Res2)), log_to_nodes(AllNodes, "Test fullsync with leader ~p", [LeaderA]), @@ -351,16 +351,16 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> FullsyncOnly, 2)), lager:info("Check the fullsync only bucket didn't replicate the writes"), - Res6 = rt:systest_read(BSecond, 1, 100, FullsyncOnly, 2), + Res6 = rt_systest:read(BSecond, 1, 100, FullsyncOnly, 2), ?assertEqual(100, length(Res6)), lager:info("Check the realtime only bucket that was written to offline " "isn't replicated"), - Res7 = rt:systest_read(BSecond, 1, 100, RealtimeOnly, 2), + Res7 = rt_systest:read(BSecond, 1, 100, RealtimeOnly, 2), ?assertEqual(100, length(Res7)), lager:info("Check the {repl, false} bucket didn't replicate"), - Res8 = rt:systest_read(BSecond, 1, 100, NoRepl, 2), + Res8 = rt_systest:read(BSecond, 1, 100, NoRepl, 2), ?assertEqual(100, length(Res8)), %% do a fullsync, make sure that fullsync_only is replicated, but @@ -372,7 +372,7 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> FullsyncOnly, 2)), lager:info("Check realtime only bucket didn't replicate"), - Res10 = rt:systest_read(BSecond, 1, 100, RealtimeOnly, 2), + Res10 = rt_systest:read(BSecond, 1, 100, RealtimeOnly, 2), ?assertEqual(100, length(Res10)), lager:info("Write 100 more keys into realtime only bucket on ~p", @@ -387,11 +387,11 @@ replication([AFirst|_] = ANodes, [BFirst|_] = BNodes, Connected) -> RealtimeOnly, 2)), lager:info("Check the older keys in the realtime bucket did not replicate"), - Res12 = rt:systest_read(BSecond, 1, 100, RealtimeOnly, 2), + Res12 = rt_systest:read(BSecond, 1, 100, RealtimeOnly, 2), ?assertEqual(100, length(Res12)), lager:info("Check {repl, false} bucket didn't replicate"), - Res13 = rt:systest_read(BSecond, 1, 100, NoRepl, 2), + Res13 = rt_systest:read(BSecond, 1, 100, NoRepl, 2), ?assertEqual(100, length(Res13)), log_to_nodes(AllNodes, "Testing offline realtime queueing"), @@ -491,7 +491,7 @@ pb_write_during_shutdown(Target, BSecond, TestBucket) -> lager:info("got ~p write failures", [length(WriteErrors)]), timer:sleep(3000), lager:info("checking number of read failures on secondary cluster"), - ReadErrors = rt:systest_read(BSecond, 1000, 11000, TestBucket, 2), + ReadErrors = rt_systest:read(BSecond, 1000, 11000, TestBucket, 2), lager:info("got ~p read failures", [length(ReadErrors)]), %% ensure node is down before we try to start it up again. @@ -501,7 +501,7 @@ pb_write_during_shutdown(Target, BSecond, TestBucket) -> rt_node:start(Target), rt:wait_until_pingable(Target), rt:wait_for_service(Target, riak_repl), - ReadErrors2 = rt:systest_read(Target, 1000, 11000, TestBucket, 2), + ReadErrors2 = rt_systest:read(Target, 1000, 11000, TestBucket, 2), lager:info("got ~p read failures on ~p", [length(ReadErrors2), Target]), case length(WriteErrors) >= length(ReadErrors) of true -> diff --git a/tests/replication/replication2_pg.erl b/tests/replication/replication2_pg.erl index bb328b4cd..06cedbeb7 100644 --- a/tests/replication/replication2_pg.erl +++ b/tests/replication/replication2_pg.erl @@ -971,9 +971,9 @@ verify_topology_change(SourceNodes, SinkNodes) -> lager:info("Rejoining former leader."), case SinkLeader of SinkNode1 -> - rt:join(SinkNode1, SinkNode2); + rt_node:join(SinkNode1, SinkNode2); SinkNode2 -> - rt:join(SinkNode2, SinkNode1) + rt_node:join(SinkNode2, SinkNode1) end, rt:wait_until_ring_converged(SinkNodes), diff --git a/tests/replication/replication2_rt_sink_connection.erl b/tests/replication/replication2_rt_sink_connection.erl index c9348fd7c..03b024e0a 100644 --- a/tests/replication/replication2_rt_sink_connection.erl +++ b/tests/replication/replication2_rt_sink_connection.erl @@ -88,7 +88,7 @@ confirm() -> enable_rt(AFirst, ANodes), lager:info("Adding 4th node to the A cluster"), - rt:join(CNode, AFirst), + rt_node:join(CNode, AFirst), [verify_connectivity(Node) || Node <- ANodes], diff --git a/tests/verify_basic_upgrade.erl b/tests/verify_basic_upgrade.erl index b1c17556a..138243048 100644 --- a/tests/verify_basic_upgrade.erl +++ b/tests/verify_basic_upgrade.erl @@ -29,8 +29,8 @@ confirm() -> Nodes = [Node1|_] = rt_cluster:build_cluster([OldVsn, OldVsn, OldVsn, OldVsn]), lager:info("Writing 100 keys to ~p", [Node1]), - rt:systest_write(Node1, 100, 3), - ?assertEqual([], rt:systest_read(Node1, 100, 1)), + rt_systest:write(Node1, 100, 3), + ?assertEqual([], rt_systest:read(Node1, 100, 1)), [upgrade(Node, current) || Node <- Nodes], @@ -43,6 +43,6 @@ upgrade(Node, NewVsn) -> rt:upgrade(Node, NewVsn), rt:wait_for_service(Node, riak_kv), lager:info("Ensuring keys still exist"), - rt:systest_read(Node, 100, 1), - ?assertEqual([], rt:systest_read(Node, 100, 1)), + rt_systest:read(Node, 100, 1), + ?assertEqual([], rt_systest:read(Node, 100, 1)), ok. diff --git a/tests/verify_build_cluster.erl b/tests/verify_build_cluster.erl index 7f87e0290..c502e6d90 100644 --- a/tests/verify_build_cluster.erl +++ b/tests/verify_build_cluster.erl @@ -38,18 +38,18 @@ confirm(#rt_properties{nodes=Nodes}, _MD) -> [Node1, Node2, Node3, Node4] = Nodes, lager:info("Loading some data up in this cluster."), - ?assertEqual([], rt:systest_write(Node1, 0, 1000, <<"verify_build_cluster">>, 2)), + ?assertEqual([], rt_systest:write(Node1, 0, 1000, <<"verify_build_cluster">>, 2)), lager:info("joining Node 2 to the cluster... It takes two to make a thing go right"), - rt:join(Node2, Node1), + rt_node:join(Node2, Node1), wait_and_validate([Node1, Node2]), lager:info("joining Node 3 to the cluster"), - rt:join(Node3, Node1), + rt_node:join(Node3, Node1), wait_and_validate([Node1, Node2, Node3]), lager:info("joining Node 4 to the cluster"), - rt:join(Node4, Node1), + rt_node:join(Node4, Node1), wait_and_validate(Nodes), lager:info("taking Node 1 down"), @@ -101,5 +101,5 @@ wait_and_validate(RingNodes, UpNodes) -> [rt_node:wait_until_owners_according_to(Node, RingNodes) || Node <- UpNodes], [rt:wait_for_service(Node, riak_kv) || Node <- UpNodes], lager:info("Verify that you got much data... (this is how we do it)"), - ?assertEqual([], rt:systest_read(hd(UpNodes), 0, 1000, <<"verify_build_cluster">>, 2)), + ?assertEqual([], rt_systest:read(hd(UpNodes), 0, 1000, <<"verify_build_cluster">>, 2)), done. diff --git a/tests/verify_capabilities.erl b/tests/verify_capabilities.erl index 2d6ee385c..28d345fb9 100644 --- a/tests/verify_capabilities.erl +++ b/tests/verify_capabilities.erl @@ -69,7 +69,7 @@ confirm() -> ?assertEqual(ok, rt:wait_until_capability(CNode, {riak_core, staged_joins}, true)), lager:info("Building current + ~s cluster", [Legacy]), - rt:join(LNode, CNode), + rt_node:join(LNode, CNode), ?assertEqual(ok, rt:wait_until_all_members([CNode], [CNode, LNode])), ?assertEqual(ok, rt:wait_until_legacy_ringready(CNode)), @@ -102,7 +102,7 @@ confirm() -> restart_capability_server(CNode), lager:info("Adding previous node to cluster"), - rt:join(PNode, LNode), + rt_node:join(PNode, LNode), ?assertEqual(ok, rt:wait_until_all_members([CNode], [CNode, LNode, PNode])), ?assertEqual(ok, rt:wait_until_legacy_ringready(CNode)), @@ -139,7 +139,7 @@ confirm() -> _ -> lager:info("Legacy Riak not available, skipping legacy tests"), lager:info("Adding previous node to cluster"), - rt:join(PNode, LNode), + rt_node:join(PNode, LNode), ?assertEqual(ok, rt:wait_until_all_members([CNode], [CNode, LNode, PNode])), ?assertEqual(ok, rt:wait_until_legacy_ringready(CNode)) end, diff --git a/tests/verify_down.erl b/tests/verify_down.erl index 2bc539849..33ca503ea 100644 --- a/tests/verify_down.erl +++ b/tests/verify_down.erl @@ -28,7 +28,7 @@ confirm() -> %% Join node2 to node1 and wait for cluster convergence lager:info("Join ~p to ~p", [Node2, Node1]), - rt:join(Node2, Node1), + rt_node:join(Node2, Node1), ?assertEqual(ok, rt_node:wait_until_nodes_ready([Node1, Node2])), ?assertEqual(ok, rt:wait_until_no_pending_changes([Node1, Node2])), @@ -40,7 +40,7 @@ confirm() -> %% Join node3 to node1 lager:info("Join ~p to ~p", [Node3, Node1]), - rt:join(Node3, Node1), + rt_node:join(Node3, Node1), ?assertEqual(ok, rt:wait_until_all_members(Remaining, [Node3])), %% Ensure node3 remains in the joining state diff --git a/tests/verify_dynamic_ring.erl b/tests/verify_dynamic_ring.erl index 19cfdbda5..368c95a51 100644 --- a/tests/verify_dynamic_ring.erl +++ b/tests/verify_dynamic_ring.erl @@ -39,8 +39,8 @@ confirm() -> %% This assignment for `NewNodes' is commented until riak_core %% issue #570 is resolved %% NewNodes = [ANode, YetAnother, ReplacingNode], - rt:join(AnotherNode, ANode), - rt:join(YetAnother, ANode), + rt_node:join(AnotherNode, ANode), + rt_node:join(YetAnother, ANode), rt_node:wait_until_nodes_agree_about_ownership(Nodes), rt:wait_until_ring_converged(Nodes), rt:wait_until_no_pending_changes(Nodes), @@ -52,14 +52,14 @@ confirm() -> wait_until_extra_proxies_shutdown(Nodes), lager:info("writing 500 keys"), - ?assertEqual([], rt:systest_write(ANode, 1, 500, ?BUCKET, ?W)), + ?assertEqual([], rt_systest:write(ANode, 1, 500, ?BUCKET, ?W)), test_resize(?SHRUNK_SIZE, ?START_SIZE, ANode, Nodes, {501, 750}), lager:info("verifying previously written data"), - ?assertEqual([], rt:systest_read(ANode, 1, 500, ?BUCKET, ?R)), + ?assertEqual([], rt_systest:read(ANode, 1, 500, ?BUCKET, ?R)), test_resize(?START_SIZE, ?EXPANDED_SIZE, ANode, Nodes), lager:info("verifying previously written data"), - ?assertEqual([], rt:systest_read(ANode, 1, 750, ?BUCKET, ?R)), + ?assertEqual([], rt_systest:read(ANode, 1, 750, ?BUCKET, ?R)), %% This following test code for force-replace is commented until %% riak_core issue #570 is resolved. At that time the preceding 3 @@ -82,11 +82,11 @@ confirm() -> %% rt:wait_until_no_pending_changes(NewNodes), %% assert_ring_size(?EXPANDED_SIZE, NewNodes), %% lager:info("verifying written data"), - %% ?assertEqual([], rt:systest_read(ANode, 1, 750, ?BUCKET, ?R)), + %% ?assertEqual([], rt_systest:read(ANode, 1, 750, ?BUCKET, ?R)), test_resize(?EXPANDED_SIZE, ?SHRUNK_SIZE, ANode, NewNodes), lager:info("verifying written data"), - ?assertEqual([], rt:systest_read(ANode, 1, 750, ?BUCKET, ?R)), + ?assertEqual([], rt_systest:read(ANode, 1, 750, ?BUCKET, ?R)), wait_until_extra_vnodes_shutdown(NewNodes), wait_until_extra_proxies_shutdown(NewNodes), @@ -110,7 +110,7 @@ confirm() -> rt:wait_until_ring_converged(NewNodes), assert_ring_size(?SHRUNK_SIZE, NewNodes), lager:info("verifying written data"), - ?assertEqual([], rt:systest_read(ANode, 1, 750, ?BUCKET, ?R)), + ?assertEqual([], rt_systest:read(ANode, 1, 750, ?BUCKET, ?R)), pass. @@ -137,7 +137,7 @@ write_during_resize(_, Start, End) when Start =:= undefined orelse End =:= undef write_during_resize(Node, Start, End) -> Pid = self(), spawn(fun() -> - case rt:systest_write(Node, Start, End, ?BUCKET, ?W) of + case rt_systest:write(Node, Start, End, ?BUCKET, ?W) of [] -> Pid ! done_writing; Ers -> @@ -151,7 +151,7 @@ verify_write_during_resize(Node, Start, End) -> receive done_writing -> lager:info("verifying data written during operation"), - ?assertEqual([], rt:systest_read(Node, Start, End, ?BUCKET, ?R)), + ?assertEqual([], rt_systest:read(Node, Start, End, ?BUCKET, ?R)), ok; {errors_writing, Ers} -> lager:error("errors were encountered while writing during operation: ~p", [Ers]), diff --git a/tests/verify_handoff.erl b/tests/verify_handoff.erl index 7e7e63039..4843b4404 100644 --- a/tests/verify_handoff.erl +++ b/tests/verify_handoff.erl @@ -82,11 +82,11 @@ run_test(TestMode, NTestItems, NTestNodes, HandoffEncoding) -> end, lager:info("Populating root node."), - rt:systest_write(RootNode, NTestItems), + rt_systest:write(RootNode, NTestItems), %% write one object with a bucket type rt_bucket_types:create_and_activate_bucket_type(RootNode, <<"type">>, []), %% allow cluster metadata some time to propogate - rt:systest_write(RootNode, 1, 2, {<<"type">>, <<"bucket">>}, 2), + rt_systest:write(RootNode, 1, 2, {<<"type">>, <<"bucket">>}, 2), %% Test handoff on each node: lager:info("Testing handoff for cluster."), @@ -107,16 +107,16 @@ test_handoff(RootNode, NewNode, NTestItems) -> rt:wait_for_service(NewNode, riak_kv), lager:info("Joining new node with cluster."), - rt:join(NewNode, RootNode), + rt_node:join(NewNode, RootNode), ?assertEqual(ok, rt_node:wait_until_nodes_ready([RootNode, NewNode])), rt:wait_until_no_pending_changes([RootNode, NewNode]), %% See if we get the same data back from the joined node that we added to the root node. %% Note: systest_read() returns /non-matching/ items, so getting nothing back is good: lager:info("Validating data after handoff:"), - Results = rt:systest_read(NewNode, NTestItems), + Results = rt_systest:read(NewNode, NTestItems), ?assertEqual(0, length(Results)), - Results2 = rt:systest_read(RootNode, 1, 2, {<<"type">>, <<"bucket">>}, 2), + Results2 = rt_systest:read(RootNode, 1, 2, {<<"type">>, <<"bucket">>}, 2), ?assertEqual(0, length(Results2)), lager:info("Data looks ok."). diff --git a/tests/verify_handoff_mixed.erl b/tests/verify_handoff_mixed.erl index 0938a122e..5edf40083 100644 --- a/tests/verify_handoff_mixed.erl +++ b/tests/verify_handoff_mixed.erl @@ -67,7 +67,7 @@ confirm() -> OldFold = rt:capability(Old, ?FOLD_CAPABILITY, v1), %% now link the nodes together and wait for handoff to complete - ok = rt:join(Old, Current), + ok = rt_node:join(Old, Current), ok = rt:wait_until_all_members(Nodes), ok = rt:wait_until_ring_converged(Nodes), diff --git a/tests/verify_listkeys.erl b/tests/verify_listkeys.erl index d4edd58e5..8ef477eb9 100644 --- a/tests/verify_listkeys.erl +++ b/tests/verify_listkeys.erl @@ -42,7 +42,7 @@ confirm() -> lists:foldl(fun(Node, [N1|_] = Cluster) -> lager:info("An invitation to this party is cordially extended to ~p.", [Node]), - rt:join(Node, N1), + rt_node:join(Node, N1), lager:info("Wait until there are no pending changes"), Ns = lists:usort([Node|Cluster]), rt:wait_until_no_pending_changes(Ns), diff --git a/tests/verify_membackend.erl b/tests/verify_membackend.erl index ded71c846..894d0b8cb 100644 --- a/tests/verify_membackend.erl +++ b/tests/verify_membackend.erl @@ -46,7 +46,7 @@ max_memory(Mode) -> Conf = mkconf(max_memory, Mode), [NodeA, NodeB] = rt_cluster:deploy_nodes(2, Conf), - rt:join(NodeB, NodeA), + rt_node:join(NodeB, NodeA), ?assertEqual(ok, check_put_delete(NodeA)), @@ -81,10 +81,10 @@ combo(Mode) -> check_leave_and_expiry(NodeA, NodeB) -> - ?assertEqual([], rt:systest_write(NodeB, 1, 100, ?BUCKET, 2)), - ?assertEqual([], rt:systest_read(NodeB, 1, 100, ?BUCKET, 2)), + ?assertEqual([], rt_systest:write(NodeB, 1, 100, ?BUCKET, 2)), + ?assertEqual([], rt_systest:read(NodeB, 1, 100, ?BUCKET, 2)), - rt:join(NodeB, NodeA), + rt_node:join(NodeB, NodeA), ?assertEqual(ok, rt_node:wait_until_nodes_ready([NodeA, NodeB])), rt:wait_until_no_pending_changes([NodeA, NodeB]), @@ -92,14 +92,14 @@ check_leave_and_expiry(NodeA, NodeB) -> rt_node:leave(NodeB), rt:wait_until_unpingable(NodeB), - ?assertEqual([], rt:systest_read(NodeA, 1, 100, ?BUCKET, 2)), + ?assertEqual([], rt_systest:read(NodeA, 1, 100, ?BUCKET, 2)), lager:info("waiting for keys to expire"), timer:sleep(timer:seconds(210)), - _ = rt:systest_read(NodeA, 1, 100, ?BUCKET, 2), + _ = rt_systest:read(NodeA, 1, 100, ?BUCKET, 2), timer:sleep(timer:seconds(5)), - Res = rt:systest_read(NodeA, 1, 100, ?BUCKET, 2), + Res = rt_systest:read(NodeA, 1, 100, ?BUCKET, 2), ?assertEqual(100, length(Res)), ok. @@ -110,9 +110,9 @@ check_eviction(Node) -> Size = 20000 * 8, Val = <<0:Size>>, - ?assertEqual([], rt:systest_write(Node, 1, 500, ?BUCKET, 2, Val)), + ?assertEqual([], rt_systest:write(Node, 1, 500, ?BUCKET, 2, Val)), - Res = length(rt:systest_read(Node, 1, 100, ?BUCKET, 2, Val)), + Res = length(rt_systest:read(Node, 1, 100, ?BUCKET, 2, Val)), %% this is a wider range than I'd like but the final outcome is %% somewhat hard to predict. Just trying to verify that some diff --git a/tests/verify_mr_prereduce_node_down.erl b/tests/verify_mr_prereduce_node_down.erl index 7df0ae261..d232946fe 100644 --- a/tests/verify_mr_prereduce_node_down.erl +++ b/tests/verify_mr_prereduce_node_down.erl @@ -53,7 +53,7 @@ confirm() -> Bucket = <<"verify_mr_prereduce_node_down">>, ObjCount = 100, lager:info("Loading ~b objects of test data", [ObjCount]), - [] = rt:systest_write(Primary, 1, ObjCount, Bucket, 3), + [] = rt_systest:write(Primary, 1, ObjCount, Bucket, 3), %% run the query a bunch C = rt_pb:pbc(Primary), diff --git a/tests/verify_riak_object_reformat.erl b/tests/verify_riak_object_reformat.erl index bdb9a9c6d..7e7a29bd7 100644 --- a/tests/verify_riak_object_reformat.erl +++ b/tests/verify_riak_object_reformat.erl @@ -39,8 +39,8 @@ confirm() -> [rt:wait_until_capability(N, {riak_kv, object_format}, v1, v0) || N <- Nodes], lager:info("Writing 100 keys in format v1 to ~p", [Node1]), - rt:systest_write(Node1, 100, ?N), - ?assertEqual([], rt:systest_read(Node1, 100, ?N)), + rt_systest:write(Node1, 100, ?N), + ?assertEqual([], rt_systest:read(Node1, 100, ?N)), lager:info("100 keys successfully written to ~p", [Node1]), %% TODO: introduce some handoff @@ -51,7 +51,7 @@ confirm() -> rt:upgrade(Node, DowngradeVsn), %% use upgrade to downgrade rt:wait_for_service(Node, riak_kv), lager:info("Ensuring keys still readable on ~p", [Node]), - ?assertEqual([], rt:systest_read(Node, 100, ?N)) + ?assertEqual([], rt_systest:read(Node, 100, ?N)) end || Node <- Nodes], pass. diff --git a/tests/verify_riak_stats.erl b/tests/verify_riak_stats.erl index d05083bf3..4367d3c1b 100644 --- a/tests/verify_riak_stats.erl +++ b/tests/verify_riak_stats.erl @@ -85,7 +85,7 @@ confirm() -> Stats3 = get_stats(Node1), - rt:systest_write(Node1, 1), + rt_systest:write(Node1, 1), %% make sure the stats that were supposed to increment did verify_inc(Stats2, Stats3, [{<<"pbc_connects_total">>, 1}, {<<"pbc_connects">>, 1}, diff --git a/tests/verify_secondary_index_reformat.erl b/tests/verify_secondary_index_reformat.erl index 9ea33f1a2..84b86b349 100644 --- a/tests/verify_secondary_index_reformat.erl +++ b/tests/verify_secondary_index_reformat.erl @@ -70,7 +70,7 @@ confirm() -> %% write some more data (make sure flag doesn't "roll back" on restart lager:info("writing some more data"), - rt:systest_write(Node, 10, 1), + rt_systest:write(Node, 10, 1), lager:info("restarting node"), rt_node:stop_and_wait(Node), diff --git a/tests/verify_snmp.erl b/tests/verify_snmp.erl index 0385fccb6..4b92051d0 100644 --- a/tests/verify_snmp.erl +++ b/tests/verify_snmp.erl @@ -59,8 +59,8 @@ confirm() -> lager:info("Doing some reads and writes to record some stats."), - rt:systest_write(Node1, 10), - rt:systest_read(Node1, 10), + rt_systest:write(Node1, 10), + rt_systest:read(Node1, 10), lager:info("Waiting for HTTP Stats to be non-zero"), ?assertEqual(ok, diff --git a/tests/verify_tick_change.erl b/tests/verify_tick_change.erl index 32f6d9040..755e766d2 100644 --- a/tests/verify_tick_change.erl +++ b/tests/verify_tick_change.erl @@ -63,11 +63,11 @@ make_common() -> write_stuff(Nodes, Start, End, Bucket, W, Common) -> Nd = lists:nth(length(Nodes), Nodes), - [] = rt:systest_write(Nd, Start, End, Bucket, W, Common). + [] = rt_systest:write(Nd, Start, End, Bucket, W, Common). read_stuff(Nodes, Start, End, Bucket, W, Common) -> Nd = lists:nth(length(Nodes), Nodes), - [] = rt:systest_read(Nd, Start, End, Bucket, W, Common). + [] = rt_systest:read(Nd, Start, End, Bucket, W, Common). is_set_net_ticktime_done(Nodes, Time) -> case lists:usort([(catch rpc:call(Node, net_kernel, get_net_ticktime,[])) From 01a132bd428d75969345a1e7cf060056f10fb6d8 Mon Sep 17 00:00:00 2001 From: Jon Anderson Date: Thu, 31 Jul 2014 14:34:23 -0400 Subject: [PATCH 17/17] Fix calls to rt:teardown; should be rt_cluster:teardown. --- src/riak_test_escript.erl | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/riak_test_escript.erl b/src/riak_test_escript.erl index 5bb67cdbc..ddc220c1b 100644 --- a/src/riak_test_escript.erl +++ b/src/riak_test_escript.erl @@ -293,7 +293,7 @@ run_test(Test, Outdir, TestMetaData, Report, HarnessArgs, NumTests) -> CoverDir = rt_config:get(cover_output, "coverage"), case NumTests of 1 -> keep_them_up; - _ -> rt:teardown() + _ -> rt_cluster:teardown() end, CoverageFile = rt_cover:maybe_export_coverage(Test, CoverDir, erlang:phash2(TestMetaData)), case Report of @@ -422,8 +422,8 @@ so_kill_riak_maybe() -> io:format("Would you like to leave Riak running in order to debug?~n"), Input = io:get_chars("[Y/n] ", 1), case Input of - "n" -> rt:teardown(); - "N" -> rt:teardown(); + "n" -> rt_cluster:teardown(); + "N" -> rt_cluster:teardown(); _ -> io:format("Leaving Riak Up... "), rt:whats_up()