diff --git a/.github/workflows/templates/test.template.yaml b/.github/workflows/templates/test.template.yaml index c93ab278192c..bbb361518df6 100644 --- a/.github/workflows/templates/test.template.yaml +++ b/.github/workflows/templates/test.template.yaml @@ -22,8 +22,8 @@ name: Test on: push: branches: - - main - - v4.0.x +#! - main +#! - v4.0.x - v3.13.x - v3.12.x - v3.11.x @@ -42,7 +42,7 @@ on: - '*.bzl' - '*.bazel' - .github/workflows/test.yaml - pull_request: +#! pull_request: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true diff --git a/.github/workflows/test-make-target.yaml b/.github/workflows/test-make-target.yaml new file mode 100644 index 000000000000..8b57eb5044b0 --- /dev/null +++ b/.github/workflows/test-make-target.yaml @@ -0,0 +1,110 @@ +name: Test target (make) +on: + workflow_call: + inputs: + erlang_version: + required: true + type: string + elixir_version: + required: true + type: string + metadata_store: + required: true + type: string + mixed_clusters: + required: false + default: false + type: boolean + make_target: + required: true + type: string + plugin: + required: true + type: string +jobs: + test: + name: ${{ inputs.plugin }} (${{ inputs.make_target }}) + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - name: CHECKOUT REPOSITORY + uses: actions/checkout@v4 + + - name: FETCH TAGS + run: git fetch --tags + + - name: SETUP OTP & ELIXIR + uses: erlef/setup-beam@v1.17 + with: + otp-version: ${{ inputs.erlang_version }} + elixir-version: ${{ inputs.elixir_version }} + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex + # This currently only applies to Elixir; and can be safely + # restricted to the build jobs to avoid duplication in output. + disable_problem_matchers: true + + - name: MIXED CLUSTERS - FETCH SIGNING KEYS + uses: dsaltares/fetch-gh-release-asset@master + if: inputs.mixed_clusters + with: + repo: rabbitmq/signing-keys + file: rabbitmq-release-signing-key.asc + + - name: MIXED CLUSTERS - FETCH PREVIOUS VERSION + id: fetch_secondary_dist + uses: dsaltares/fetch-gh-release-asset@master + if: inputs.mixed_clusters + with: + regex: true + file: "rabbitmq-server-generic-unix-[\\d.]*\\.tar.xz" + target: ./ + + - name: MIXED CLUSTERS - SETUP SECONDARY_DIST + if: inputs.mixed_clusters + run: | + gpg --import rabbitmq-release-signing-key.asc + gpg --verify rabbitmq-server-generic-unix-*.asc rabbitmq-server-generic-unix-*.tar.xz + tar xf rabbitmq-server-generic-unix-*.tar.xz + + echo "SECONDARY_DIST=${GITHUB_WORKSPACE}/rabbitmq_server-`echo -n ${{ steps.fetch_secondary_dist.outputs.version }} | sed s/v//`" >> $GITHUB_ENV + + - name: SETUP DOTNET (rabbit) + uses: actions/setup-dotnet@v4 + if: inputs.plugin == 'rabbit' + with: + dotnet-version: '3.1.x' + + - name: SETUP SLAPD (rabbitmq_auth_backend_ldap) + if: inputs.plugin == 'rabbitmq_auth_backend_ldap' + run: | + sudo apt-get update && \ + sudo apt-get install -y \ + apparmor-utils \ + ldap-utils \ + slapd + + sudo aa-complain `which slapd` + + - name: RUN TESTS + if: inputs.plugin != 'rabbitmq_cli' + run: | + make -C deps/${{ inputs.plugin }} ${{ inputs.make_target }} RABBITMQ_METADATA_STORE=${{ inputs.metadata_store }} + + # rabbitmq_cli needs a correct broker version for two of its tests. + # But setting PROJECT_VERSION makes other plugins fail. + - name: RUN TESTS (rabbitmq_cli) + if: inputs.plugin == 'rabbitmq_cli' + run: | + make -C deps/${{ inputs.plugin }} ${{ inputs.make_target }} RABBITMQ_METADATA_STORE=${{ inputs.metadata_store }} PROJECT_VERSION="4.1.0" + + - name: UPLOAD TEST LOGS + if: always() + uses: actions/upload-artifact@v4 + with: + name: CT logs (${{ inputs.plugin }} ${{ inputs.make_target }} OTP-${{ inputs.erlang_version }} ${{ inputs.metadata_store }}${{ inputs.mixed_clusters && ' mixed' || '' }}) + path: | + logs/ +# !logs/**/log_private + if-no-files-found: ignore diff --git a/.github/workflows/test-make-tests.yaml b/.github/workflows/test-make-tests.yaml new file mode 100644 index 000000000000..5fa4c6e43d48 --- /dev/null +++ b/.github/workflows/test-make-tests.yaml @@ -0,0 +1,121 @@ +name: Run tests (make) +on: + workflow_call: + inputs: + erlang_version: + required: true + type: string + elixir_version: + required: true + type: string + metadata_store: + required: true + type: string + mixed_clusters: + required: true + type: boolean +jobs: + test-rabbit: + name: Test rabbit + strategy: + fail-fast: false + matrix: + make_target: + - parallel-ct-set-1 + - parallel-ct-set-2 + - parallel-ct-set-3 + - parallel-ct-set-4 + - ct-clustering_management + - eunit ct-dead_lettering + - ct-feature_flags + - ct-metadata_store_clustering + - ct-quorum_queue + - ct-rabbit_stream_queue + uses: ./.github/workflows/test-make-target.yaml + with: + erlang_version: ${{ inputs.erlang_version }} + elixir_version: ${{ inputs.elixir_version }} + metadata_store: ${{ inputs.metadata_store }} + mixed_clusters: ${{ inputs.mixed_clusters }} + make_target: ${{ matrix.make_target }} + plugin: rabbit + + test-rabbitmq-mqtt: + name: Test rabbitmq_mqtt + uses: ./.github/workflows/test-make-target.yaml + with: + erlang_version: ${{ inputs.erlang_version }} + elixir_version: ${{ inputs.elixir_version }} + metadata_store: ${{ inputs.metadata_store }} + mixed_clusters: ${{ inputs.mixed_clusters }} + make_target: parallel-ct-set-1 + plugin: rabbitmq_mqtt + + # The integration_SUITE requires secrets and + # is therefore run from a separate workflow. + test-rabbitmq-peer-discovery-aws: + name: Test rabbitmq_peer_discovery_aws (partially) + uses: ./.github/workflows/test-make-target.yaml + with: + erlang_version: ${{ inputs.erlang_version }} + elixir_version: ${{ inputs.elixir_version }} + metadata_store: ${{ inputs.metadata_store }} + mixed_clusters: ${{ inputs.mixed_clusters }} + make_target: ct-config_schema ct-unit + plugin: rabbitmq_peer_discovery_aws + + test-plugin: + name: Test plugins + strategy: + fail-fast: false + matrix: + plugin: + - amqp10_client + - amqp10_common + - amqp_client + - oauth2_client + - rabbit_common + - rabbitmq_amqp_client + - rabbitmq_auth_backend_cache + - rabbitmq_auth_backend_http + - rabbitmq_auth_backend_ldap + - rabbitmq_auth_backend_oauth2 + - rabbitmq_auth_mechanism_ssl + - rabbitmq_aws + - rabbitmq_cli + - rabbitmq_consistent_hash_exchange + - rabbitmq_event_exchange + - rabbitmq_federation + - rabbitmq_federation_management + - rabbitmq_federation_prometheus + - rabbitmq_jms_topic_exchange + - rabbitmq_management + - rabbitmq_management_agent + - rabbitmq_peer_discovery_common + - rabbitmq_peer_discovery_consul + - rabbitmq_peer_discovery_etcd + - rabbitmq_peer_discovery_k8s + - rabbitmq_prelaunch + - rabbitmq_prometheus + - rabbitmq_recent_history_exchange + - rabbitmq_sharding + - rabbitmq_shovel + - rabbitmq_shovel_management + - rabbitmq_shovel_prometheus + - rabbitmq_stomp + - rabbitmq_stream + - rabbitmq_stream_common + - rabbitmq_stream_management + - rabbitmq_tracing + - rabbitmq_trust_store + - rabbitmq_web_dispatch + - rabbitmq_web_mqtt + - rabbitmq_web_stomp + uses: ./.github/workflows/test-make-target.yaml + with: + erlang_version: ${{ inputs.erlang_version }} + elixir_version: ${{ inputs.elixir_version }} + metadata_store: ${{ inputs.metadata_store }} + mixed_clusters: ${{ inputs.mixed_clusters }} + make_target: tests + plugin: ${{ matrix.plugin }} diff --git a/.github/workflows/test-make-type-check.yaml b/.github/workflows/test-make-type-check.yaml new file mode 100644 index 000000000000..bf977874aff9 --- /dev/null +++ b/.github/workflows/test-make-type-check.yaml @@ -0,0 +1,82 @@ +name: Type check (make) +on: + workflow_call: + inputs: + erlang_version: + required: true + type: string + elixir_version: + required: true + type: string +jobs: + type-check-plugin: + name: Type check plugins + strategy: + fail-fast: false + matrix: + plugin: + # These are using plugin-specific test jobs. + - rabbit + - rabbitmq_mqtt + - rabbitmq_peer_discovery_aws + # These are from the test-plugin test job. + - amqp10_client + - amqp10_common + - amqp_client + - oauth2_client + - rabbit_common + - rabbitmq_amqp_client + - rabbitmq_auth_backend_cache + - rabbitmq_auth_backend_http + - rabbitmq_auth_backend_ldap + - rabbitmq_auth_backend_oauth2 + - rabbitmq_auth_mechanism_ssl + - rabbitmq_aws + - rabbitmq_consistent_hash_exchange + - rabbitmq_event_exchange + - rabbitmq_federation + - rabbitmq_federation_management + - rabbitmq_federation_prometheus + - rabbitmq_jms_topic_exchange + - rabbitmq_management + - rabbitmq_management_agent + - rabbitmq_peer_discovery_common + - rabbitmq_peer_discovery_consul + # @todo We are getting errors because of wrong types + # in the eetcd dep. But upgrading requires using gun 2.0, + # which we can't because another app's dep, emqtt, requires + # gun 1.3.x. So for now we don't type check this plugin. + #- rabbitmq_peer_discovery_etcd + - rabbitmq_peer_discovery_k8s + - rabbitmq_prelaunch + - rabbitmq_prometheus + - rabbitmq_recent_history_exchange + - rabbitmq_sharding + - rabbitmq_shovel + - rabbitmq_shovel_management + - rabbitmq_shovel_prometheus + - rabbitmq_stomp + - rabbitmq_stream + - rabbitmq_stream_common + - rabbitmq_stream_management + - rabbitmq_tracing + - rabbitmq_trust_store + - rabbitmq_web_dispatch + - rabbitmq_web_mqtt + - rabbitmq_web_stomp + # This one we do not want to run tests so no corresponding test job. + - rabbitmq_ct_helpers + # These do not have tests at this time so no corresponding test job. + - rabbitmq_ct_client_helpers + - rabbitmq_random_exchange + - rabbitmq_top + - rabbitmq_web_mqtt_examples + - rabbitmq_web_stomp_examples + - trust_store_http + uses: ./.github/workflows/test-make-target.yaml + with: + erlang_version: ${{ inputs.erlang_version }} + elixir_version: ${{ inputs.elixir_version }} + metadata_store: khepri # Not actually used. + make_target: dialyze + plugin: ${{ matrix.plugin }} diff --git a/.github/workflows/test-make.yaml b/.github/workflows/test-make.yaml new file mode 100644 index 000000000000..60d61dad9fc4 --- /dev/null +++ b/.github/workflows/test-make.yaml @@ -0,0 +1,112 @@ +name: Test (make) +on: + push: + branches: + - main + - v4.0.x + paths: + - deps/** + - scripts/** + - Makefile + - plugins.mk + - rabbitmq-components.mk + - .github/workflows/test-make.yaml + pull_request: +concurrency: + group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} + cancel-in-progress: true +jobs: + build-and-xref: + name: Build and Xref + strategy: + fail-fast: false + matrix: + erlang_version: + - '26' + - '27' + elixir_version: + - '1.17' + # @todo Add macOS and Windows. + runs-on: ubuntu-latest + timeout-minutes: 30 + steps: + - name: CHECKOUT REPOSITORY + uses: actions/checkout@v4 + + - name: FETCH TAGS + run: git fetch --tags + + - name: SETUP OTP & ELIXIR + uses: erlef/setup-beam@v1.17 + with: + otp-version: ${{ matrix.erlang_version }} + elixir-version: ${{ matrix.elixir_version }} + hexpm-mirrors: | + https://builds.hex.pm + https://cdn.jsdelivr.net/hex + + - name: SANITY CHECK (rabbit) + run: make -C deps/rabbit parallel-ct-sanity-check + + - name: SANITY CHECK (rabbitmq_mqtt) + run: make -C deps/rabbitmq_mqtt parallel-ct-sanity-check + + - name: BUILD + run: make + + - name: XREF + run: make xref + + test: + name: Test + strategy: + fail-fast: false + matrix: + erlang_version: + - '26' +# - '27' + elixir_version: + - '1.17' + metadata_store: + - mnesia + - khepri + uses: ./.github/workflows/test-make-tests.yaml + with: + erlang_version: ${{ matrix.erlang_version }} + elixir_version: ${{ matrix.elixir_version }} + metadata_store: ${{ matrix.metadata_store }} + mixed_clusters: false + + test-mixed-clusters: + name: Test mixed clusters + strategy: + fail-fast: false + matrix: + erlang_version: + - '26' +# - '27' + elixir_version: + - '1.17' + metadata_store: + - mnesia + - khepri + uses: ./.github/workflows/test-make-tests.yaml + with: + erlang_version: ${{ matrix.erlang_version }} + elixir_version: ${{ matrix.elixir_version }} + metadata_store: ${{ matrix.metadata_store }} + mixed_clusters: true + + type-check: + name: Type check + strategy: + fail-fast: false + matrix: + erlang_version: # Latest OTP + - '27' + elixir_version: # Latest Elixir + - '1.17' + uses: ./.github/workflows/test-make-type-check.yaml + with: + erlang_version: ${{ matrix.erlang_version }} + elixir_version: ${{ matrix.elixir_version }} diff --git a/.github/workflows/test-mixed-versions.yaml b/.github/workflows/test-mixed-versions.yaml index 14b2b5b67d3c..bf2ed9ae2fdb 100644 --- a/.github/workflows/test-mixed-versions.yaml +++ b/.github/workflows/test-mixed-versions.yaml @@ -2,8 +2,6 @@ name: Test Mixed Version Clusters on: push: branches: - - main - - v4.0.x - v3.13.x - bump-otp-* - bump-elixir-* @@ -21,7 +19,6 @@ on: - '*.bzl' - '*.bazel' - .github/workflows/test-mixed-versions.yaml - pull_request: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 85c8e89a7bd1..6c3c003670ef 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -2,8 +2,6 @@ name: Test on: push: branches: - - main - - v4.0.x - v3.13.x - v3.12.x - v3.11.x @@ -22,7 +20,6 @@ on: - '*.bzl' - '*.bazel' - .github/workflows/test.yaml - pull_request: concurrency: group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }} cancel-in-progress: true diff --git a/Makefile b/Makefile index 40e234a05e82..2e4fe88c9f7a 100644 --- a/Makefile +++ b/Makefile @@ -24,8 +24,7 @@ ADDITIONAL_PLUGINS ?= DEPS = rabbit_common rabbit $(PLUGINS) $(ADDITIONAL_PLUGINS) DEP_PLUGINS = rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-tools.mk + rabbit_common/mk/rabbitmq-run.mk DISABLE_DISTCLEAN = 1 @@ -61,6 +60,20 @@ include rabbitmq-components.mk # multiple times (including for release file names and whatnot). PROJECT_VERSION := $(PROJECT_VERSION) +# Fetch/build community plugins. +# +# To include community plugins in commands, use +# `make COMMUNITY_PLUGINS=1` or export the variable. +# They are not included otherwise. Note that only +# the top-level Makefile can do this. +# +# Note that the community plugins will be fetched using +# SSH and therefore may be subject to GH authentication. + +ifdef COMMUNITY_PLUGINS +DEPS += $(RABBITMQ_COMMUNITY) +endif + include erlang.mk include mk/github-actions.mk include mk/bazel.mk diff --git a/deps/amqp10_client/Makefile b/deps/amqp10_client/Makefile index 36c117c78ea1..ceb96f382525 100644 --- a/deps/amqp10_client/Makefile +++ b/deps/amqp10_client/Makefile @@ -33,13 +33,11 @@ DEPS = amqp10_common credentials_obfuscation TEST_DEPS = rabbit rabbitmq_ct_helpers LOCAL_DEPS = ssl inets crypto public_key -DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-hexpm.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-test.mk \ - rabbit_common/mk/rabbitmq-tools.mk +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +# We do not depend on rabbit therefore can't run the broker; +# however we can run a test broker in the test suites. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk \ + rabbit_common/mk/rabbitmq-hexpm.mk DEP_PLUGINS += elvis_mk dep_elvis_mk = git https://github.com/inaka/elvis.mk.git master diff --git a/deps/amqp10_common/Makefile b/deps/amqp10_common/Makefile index 6d1b124b817b..6208fecad785 100644 --- a/deps/amqp10_common/Makefile +++ b/deps/amqp10_common/Makefile @@ -24,7 +24,7 @@ define HEX_TARBALL_EXTRA_METADATA } endef -DIALYZER_OPTS += --src -r test -DTEST +#DIALYZER_OPTS += --src -r test -DTEST BUILD_DEPS = rabbit_common TEST_DEPS = rabbitmq_ct_helpers proper @@ -38,12 +38,10 @@ TEST_DEPS = rabbitmq_ct_helpers proper -include development.pre.mk -DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +# We do not depend on rabbit therefore can't run the broker. DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-hexpm.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-test.mk \ - rabbit_common/mk/rabbitmq-tools.mk + rabbit_common/mk/rabbitmq-hexpm.mk PLT_APPS = eunit diff --git a/deps/amqp_client/Makefile b/deps/amqp_client/Makefile index c873f300e553..43dbb62901ad 100644 --- a/deps/amqp_client/Makefile +++ b/deps/amqp_client/Makefile @@ -43,13 +43,11 @@ LOCAL_DEPS = xmerl ssl public_key DEPS = rabbit_common credentials_obfuscation TEST_DEPS = rabbitmq_ct_helpers rabbit meck -DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-hexpm.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-test.mk \ - rabbit_common/mk/rabbitmq-tools.mk +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +# We do not depend on rabbit therefore can't run the broker; +# however we can run a test broker in the test suites. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk \ + rabbit_common/mk/rabbitmq-hexpm.mk PLT_APPS = ssl public_key diff --git a/deps/amqp_client/test/system_SUITE.erl b/deps/amqp_client/test/system_SUITE.erl index fe8309ce473a..2ff03e8d20a5 100644 --- a/deps/amqp_client/test/system_SUITE.erl +++ b/deps/amqp_client/test/system_SUITE.erl @@ -335,14 +335,16 @@ safe_call_timeouts_test(Params = #amqp_params_network{}) -> meck:unload(amqp_network_connection); safe_call_timeouts_test(Params = #amqp_params_direct{}) -> + %% We must mock net_kernel:get_net_ticktime/0 as changing + %% the tick time directly could lead to nodes disconnecting. + meck:new(net_kernel, [unstick, passthrough]), + TestCallTimeout = 30000, - NetTicktime0 = net_kernel:get_net_ticktime(), amqp_util:update_call_timeout(TestCallTimeout), %% 1. NetTicktime >= DIRECT_OPERATION_TIMEOUT (120s) NetTicktime1 = 140, - net_kernel:set_net_ticktime(NetTicktime1, 1), - wait_until_net_ticktime(NetTicktime1), + meck:expect(net_kernel, get_net_ticktime, fun() -> NetTicktime1 end), {ok, Connection1} = amqp_connection:start(Params), ?assertEqual((NetTicktime1 * 1000) + ?CALL_TIMEOUT_DEVIATION, @@ -356,15 +358,12 @@ safe_call_timeouts_test(Params = #amqp_params_direct{}) -> %% 2. Transitioning NetTicktime >= DIRECT_OPERATION_TIMEOUT (120s) NetTicktime2 = 120, - net_kernel:set_net_ticktime(NetTicktime2, 1), - ?assertEqual({ongoing_change_to, NetTicktime2}, net_kernel:get_net_ticktime()), + meck:expect(net_kernel, get_net_ticktime, fun() -> {ongoing_change_to, NetTicktime2} end), {ok, Connection2} = amqp_connection:start(Params), ?assertEqual((NetTicktime2 * 1000) + ?CALL_TIMEOUT_DEVIATION, amqp_util:call_timeout()), - wait_until_net_ticktime(NetTicktime2), - ?assertEqual(ok, amqp_connection:close(Connection2)), wait_for_death(Connection2), @@ -373,15 +372,14 @@ safe_call_timeouts_test(Params = #amqp_params_direct{}) -> %% 3. NetTicktime < DIRECT_OPERATION_TIMEOUT (120s) NetTicktime3 = 60, - net_kernel:set_net_ticktime(NetTicktime3, 1), - wait_until_net_ticktime(NetTicktime3), + meck:expect(net_kernel, get_net_ticktime, fun() -> NetTicktime3 end), {ok, Connection3} = amqp_connection:start(Params), ?assertEqual((?DIRECT_OPERATION_TIMEOUT + ?CALL_TIMEOUT_DEVIATION), amqp_util:call_timeout()), - net_kernel:set_net_ticktime(NetTicktime0, 1), - wait_until_net_ticktime(NetTicktime0), + meck:unload(net_kernel), + ?assertEqual(ok, amqp_connection:close(Connection3)), wait_for_death(Connection3), @@ -1578,16 +1576,6 @@ assert_down_with_error(MonitorRef, CodeAtom) -> exit(did_not_die) end. -wait_until_net_ticktime(NetTicktime) -> - case net_kernel:get_net_ticktime() of - NetTicktime -> ok; - {ongoing_change_to, NetTicktime} -> - timer:sleep(1000), - wait_until_net_ticktime(NetTicktime); - _ -> - throw({error, {net_ticktime_not_set, NetTicktime}}) - end. - set_resource_alarm(Resource, Config) when Resource =:= memory orelse Resource =:= disk -> SrcDir = ?config(amqp_client_srcdir, Config), diff --git a/deps/oauth2_client/Makefile b/deps/oauth2_client/Makefile index 2acf3a7c2d0d..6dcf2cbaf7c6 100644 --- a/deps/oauth2_client/Makefile +++ b/deps/oauth2_client/Makefile @@ -9,13 +9,8 @@ LOCAL_DEPS = ssl inets crypto public_key PLT_APPS = rabbit -DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-hexpm.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-test.mk \ - rabbit_common/mk/rabbitmq-tools.mk +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk include rabbitmq-components.mk include erlang.mk diff --git a/deps/rabbit/.gitignore b/deps/rabbit/.gitignore index 7f6246dc7b9e..9e124a080135 100644 --- a/deps/rabbit/.gitignore +++ b/deps/rabbit/.gitignore @@ -2,7 +2,5 @@ /etc/ /test/config_schema_SUITE_data/schema/** -rabbit-rabbitmq-deps.mk - [Bb]in/ [Oo]bj/ diff --git a/deps/rabbit/BUILD.bazel b/deps/rabbit/BUILD.bazel index b240b65f8588..e705119b2ca9 100644 --- a/deps/rabbit/BUILD.bazel +++ b/deps/rabbit/BUILD.bazel @@ -459,22 +459,6 @@ rabbitmq_integration_suite( size = "large", ) -rabbitmq_integration_suite( - name = "feature_flags_with_unpriveleged_user_SUITE", - size = "large", - additional_beam = [ - ":feature_flags_SUITE_beam_files", - ], - flaky = True, - shard_count = 2, - # The enabling_* tests chmod files and then expect writes to be blocked. - # This probably doesn't work because we are root in the remote docker image. - tags = ["no-remote-exec"], - runtime_deps = [ - "//deps/rabbit/test/feature_flags_SUITE_data/my_plugin:erlang_app", - ], -) - rabbitmq_integration_suite( name = "list_consumers_sanity_check_SUITE", size = "medium", diff --git a/deps/rabbit/Makefile b/deps/rabbit/Makefile index dc77070a533e..caae4f48da4b 100644 --- a/deps/rabbit/Makefile +++ b/deps/rabbit/Makefile @@ -134,8 +134,6 @@ TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers meck proper amqp_clie PLT_APPS += mnesia runtime_tools dep_syslog = git https://github.com/schlagert/syslog 4.0.0 -dep_osiris = git https://github.com/rabbitmq/osiris v1.8.3 -dep_systemd = hex 0.6.1 define usage_xml_to_erl $(subst __,_,$(patsubst $(DOCS_DIR)/rabbitmq%.1.xml, src/rabbit_%_usage.erl, $(subst -,_,$(1)))) @@ -146,12 +144,8 @@ MANPAGES = $(wildcard $(DOCS_DIR)/*.[0-9]) WEB_MANPAGES = $(patsubst %,%.html,$(MANPAGES)) MD_MANPAGES = $(patsubst %,%.md,$(MANPAGES)) -DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-test.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-test.mk \ - rabbit_common/mk/rabbitmq-tools.mk +DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk include ../../rabbitmq-components.mk include ../../erlang.mk @@ -214,8 +208,129 @@ SLOW_CT_SUITES := backing_queue \ vhost FAST_CT_SUITES := $(filter-out $(sort $(SLOW_CT_SUITES)),$(CT_SUITES)) -ct-fast: CT_SUITES = $(FAST_CT_SUITES) -ct-slow: CT_SUITES = $(SLOW_CT_SUITES) +ct-fast: + $(MAKE) ct CT_SUITES='$(FAST_CT_SUITES)' + +ct-slow: + $(MAKE) ct CT_SUITES='$(SLOW_CT_SUITES)' + +CT_OPTS += -ct_hooks rabbit_ct_hook [] + +# Parallel CT. +# +# @todo We must ensure that the CT_OPTS also apply to ct-master +# @todo We should probably refactor ct_master.erl to have node init in a separate .erl + +define ct_master.erl + StartOpts = #{ + host => "localhost", + connection => standard_io, + args => ["-hidden"] + }, + {ok, Pid1, _} = peer:start(StartOpts#{name => "rabbit_shard1"}), + {ok, Pid2, _} = peer:start(StartOpts#{name => "rabbit_shard2"}), + {ok, Pid3, _} = peer:start(StartOpts#{name => "rabbit_shard3"}), + {ok, Pid4, _} = peer:start(StartOpts#{name => "rabbit_shard4"}), + peer:call(Pid1, net_kernel, set_net_ticktime, [5]), + peer:call(Pid2, net_kernel, set_net_ticktime, [5]), + peer:call(Pid3, net_kernel, set_net_ticktime, [5]), + peer:call(Pid4, net_kernel, set_net_ticktime, [5]), + peer:call(Pid1, persistent_term, put, [rabbit_ct_tcp_port_base, 23000]), + peer:call(Pid2, persistent_term, put, [rabbit_ct_tcp_port_base, 25000]), + peer:call(Pid3, persistent_term, put, [rabbit_ct_tcp_port_base, 27000]), + peer:call(Pid4, persistent_term, put, [rabbit_ct_tcp_port_base, 29000]), + [{[_], {ok, Results}}] = ct_master_fork:run("$1"), + peer:stop(Pid4), + peer:stop(Pid3), + peer:stop(Pid2), + peer:stop(Pid1), + lists:foldl(fun + ({_, {_, 0, {_, 0}}}, Err) -> Err + 1; + (What, Peer) -> halt(Peer) + end, 1, Results), + halt(0) +endef + +PARALLEL_CT_SET_1_A = amqp_client unit_cluster_formation_locking_mocks unit_cluster_formation_sort_nodes unit_collections unit_config_value_encryption unit_connection_tracking +PARALLEL_CT_SET_1_B = amqp_address amqp_auth amqp_credit_api_v2 amqp_system signal_handling single_active_consumer unit_access_control_authn_authz_context_propagation unit_access_control_credential_validation unit_amqp091_content_framing unit_amqp091_server_properties unit_app_management +PARALLEL_CT_SET_1_C = amqp_proxy_protocol amqpl_consumer_ack amqpl_direct_reply_to backing_queue bindings rabbit_db_maintenance rabbit_db_msup rabbit_db_policy rabbit_db_queue rabbit_db_topic_exchange rabbit_direct_reply_to_prop cluster_limit cluster_minority term_to_binary_compat_prop topic_permission transactions unicode unit_access_control +PARALLEL_CT_SET_1_D = amqqueue_backward_compatibility channel_interceptor channel_operation_timeout classic_queue classic_queue_prop config_schema peer_discovery_dns peer_discovery_tmp_hidden_node per_node_limit per_user_connection_channel_limit + +PARALLEL_CT_SET_2_A = cluster confirms_rejects consumer_timeout rabbit_access_control rabbit_confirms rabbit_core_metrics_gc rabbit_cuttlefish rabbit_db_binding rabbit_db_exchange +PARALLEL_CT_SET_2_B = clustering_recovery crashing_queues deprecated_features direct_exchange_routing_v2 disconnect_detected_during_alarm exchanges unit_gen_server2 +PARALLEL_CT_SET_2_C = disk_monitor dynamic_qq unit_disk_monitor unit_file_handle_cache unit_log_management unit_operator_policy +PARALLEL_CT_SET_2_D = queue_length_limits queue_parallel quorum_queue_member_reconciliation rabbit_fifo rabbit_fifo_dlx rabbit_stream_coordinator + +PARALLEL_CT_SET_3_A = definition_import per_user_connection_channel_limit_partitions per_vhost_connection_limit_partitions policy priority_queue_recovery rabbit_fifo_prop rabbit_fifo_v0 rabbit_stream_sac_coordinator unit_credit_flow unit_queue_consumers unit_queue_location unit_quorum_queue +PARALLEL_CT_SET_3_B = cluster_upgrade list_consumers_sanity_check list_queues_online_and_offline logging lqueue maintenance_mode rabbit_fifo_q +PARALLEL_CT_SET_3_C = cli_forget_cluster_node feature_flags_v2 mc_unit message_containers_deaths_v2 message_size_limit metadata_store_migration +PARALLEL_CT_SET_3_D = metadata_store_phase1 metrics mirrored_supervisor msg_store peer_discovery_classic_config proxy_protocol runtime_parameters unit_stats_and_metrics unit_supervisor2 unit_vm_memory_monitor + +PARALLEL_CT_SET_4_A = clustering_events rabbit_local_random_exchange rabbit_message_interceptor rabbitmq_4_0_deprecations unit_pg_local unit_plugin_directories unit_plugin_versioning unit_policy_validators unit_priority_queue +PARALLEL_CT_SET_4_B = per_user_connection_tracking per_vhost_connection_limit rabbit_fifo_dlx_integration rabbit_fifo_int +PARALLEL_CT_SET_4_C = per_vhost_msg_store per_vhost_queue_limit priority_queue upgrade_preparation vhost +PARALLEL_CT_SET_4_D = per_user_connection_channel_tracking product_info publisher_confirms_parallel queue_type rabbitmq_queues_cli_integration rabbitmqctl_integration rabbitmqctl_shutdown routing + +PARALLEL_CT_SET_1 = $(sort $(PARALLEL_CT_SET_1_A) $(PARALLEL_CT_SET_1_B) $(PARALLEL_CT_SET_1_C) $(PARALLEL_CT_SET_1_D)) +PARALLEL_CT_SET_2 = $(sort $(PARALLEL_CT_SET_2_A) $(PARALLEL_CT_SET_2_B) $(PARALLEL_CT_SET_2_C) $(PARALLEL_CT_SET_2_D)) +PARALLEL_CT_SET_3 = $(sort $(PARALLEL_CT_SET_3_A) $(PARALLEL_CT_SET_3_B) $(PARALLEL_CT_SET_3_C) $(PARALLEL_CT_SET_3_D)) +PARALLEL_CT_SET_4 = $(sort $(PARALLEL_CT_SET_4_A) $(PARALLEL_CT_SET_4_B) $(PARALLEL_CT_SET_4_C) $(PARALLEL_CT_SET_4_D)) + +SEQUENTIAL_CT_SUITES = clustering_management dead_lettering feature_flags metadata_store_clustering quorum_queue rabbit_stream_queue +PARALLEL_CT_SUITES = $(PARALLEL_CT_SET_1) $(PARALLEL_CT_SET_2) $(PARALLEL_CT_SET_3) $(PARALLEL_CT_SET_4) + +ifeq ($(filter-out $(SEQUENTIAL_CT_SUITES) $(PARALLEL_CT_SUITES),$(CT_SUITES)),) +parallel-ct-sanity-check: + $(verbose) : +else +parallel-ct-sanity-check: + $(verbose) printf "%s\n" \ + "In order for new test suites to be run in CI, the test suites" \ + "must be added to one of the PARALLEL_CT_SET__ variables." \ + "" \ + "The following test suites are missing:" \ + "$(filter-out $(SEQUENTIAL_CT_SUITES) $(PARALLEL_CT_SUITES),$(CT_SUITES))" + $(verbose) exit 1 +endif + +define tpl_parallel_ct_test_spec +{logdir, "$(CT_LOGS_DIR)"}. +{logdir, master, "$(CT_LOGS_DIR)"}. +{create_priv_dir, all_nodes, auto_per_run}. +{auto_compile, false}. + +{node, shard1, 'rabbit_shard1@localhost'}. +{node, shard2, 'rabbit_shard2@localhost'}. +{node, shard3, 'rabbit_shard3@localhost'}. +{node, shard4, 'rabbit_shard4@localhost'}. + +{define, 'Set1', [$(call comma_list,$(addsuffix _SUITE,$1))]}. +{define, 'Set2', [$(call comma_list,$(addsuffix _SUITE,$2))]}. +{define, 'Set3', [$(call comma_list,$(addsuffix _SUITE,$3))]}. +{define, 'Set4', [$(call comma_list,$(addsuffix _SUITE,$4))]}. + +{suites, shard1, "test/", 'Set1'}. +{suites, shard2, "test/", 'Set2'}. +{suites, shard3, "test/", 'Set3'}. +{suites, shard4, "test/", 'Set4'}. +endef + +define parallel_ct_set_target +tpl_parallel_ct_test_spec_set_$1 = $$(call tpl_parallel_ct_test_spec,$(PARALLEL_CT_SET_$(1)_A),$(PARALLEL_CT_SET_$(1)_B),$(PARALLEL_CT_SET_$(1)_C),$(PARALLEL_CT_SET_$(1)_D)) + +parallel-ct-set-$(1): test-build + $(verbose) mkdir -p $(CT_LOGS_DIR) + $(verbose) $$(call core_render,tpl_parallel_ct_test_spec_set_$(1),ct.set-$(1).spec) + $$(call erlang,$$(call ct_master.erl,ct.set-$(1).spec),-sname parallel_ct_$(PROJECT)@localhost -hidden -kernel net_ticktime 5) +endef + +$(foreach set,1 2 3 4,$(eval $(call parallel_ct_set_target,$(set)))) + +# @todo Generate ct.test.spec from Makefile variables instead of hardcoded for ct-master + +parallel-ct: test-build + $(verbose) mkdir -p $(CT_LOGS_DIR) + $(call erlang,$(call ct_master.erl,ct.test.spec),-sname parallel_ct_$(PROJECT)@localhost -hidden -kernel net_ticktime 5) # -------------------------------------------------------------------- # Compilation. diff --git a/deps/rabbit/app.bzl b/deps/rabbit/app.bzl index 1f1154010db1..88984b3401fd 100644 --- a/deps/rabbit/app.bzl +++ b/deps/rabbit/app.bzl @@ -994,14 +994,6 @@ def test_suite_beam_files(name = "test_suite_beam_files"): app_name = "rabbit", erlc_opts = "//:test_erlc_opts", ) - erlang_bytecode( - name = "feature_flags_with_unpriveleged_user_SUITE_beam_files", - testonly = True, - srcs = ["test/feature_flags_with_unpriveleged_user_SUITE.erl"], - outs = ["test/feature_flags_with_unpriveleged_user_SUITE.beam"], - app_name = "rabbit", - erlc_opts = "//:test_erlc_opts", - ) erlang_bytecode( name = "list_consumers_sanity_check_SUITE_beam_files", testonly = True, diff --git a/deps/rabbit/ct.test.spec b/deps/rabbit/ct.test.spec new file mode 100644 index 000000000000..6740594c1500 --- /dev/null +++ b/deps/rabbit/ct.test.spec @@ -0,0 +1,185 @@ +{logdir, "logs/"}. +{logdir, master, "logs/"}. +{create_priv_dir, all_nodes, auto_per_run}. + +{node, shard1, 'rabbit_shard1@localhost'}. +{node, shard2, 'rabbit_shard2@localhost'}. +{node, shard3, 'rabbit_shard3@localhost'}. +{node, shard4, 'rabbit_shard4@localhost'}. + +%% +%% Sets of test suites that take around the same time to complete. +%% + +{define, 'Set1', [ + amqp_address_SUITE +, amqp_auth_SUITE +, amqp_client_SUITE +, amqp_credit_api_v2_SUITE +, amqp_proxy_protocol_SUITE +, amqp_system_SUITE +, amqpl_consumer_ack_SUITE +, amqpl_direct_reply_to_SUITE +, amqqueue_backward_compatibility_SUITE +, backing_queue_SUITE +, bindings_SUITE +, channel_interceptor_SUITE +, channel_operation_timeout_SUITE +, classic_queue_SUITE +, classic_queue_prop_SUITE +]}. + +{define, 'Set2', [ + cluster_SUITE +, config_schema_SUITE +, confirms_rejects_SUITE +, consumer_timeout_SUITE +, crashing_queues_SUITE +, deprecated_features_SUITE +, direct_exchange_routing_v2_SUITE +, disconnect_detected_during_alarm_SUITE +, disk_monitor_SUITE +, dynamic_qq_SUITE +, exchanges_SUITE +, rabbit_stream_queue_SUITE +]}. + +{define, 'Set3', [ + cli_forget_cluster_node_SUITE +, feature_flags_SUITE +, feature_flags_v2_SUITE +, list_consumers_sanity_check_SUITE +, list_queues_online_and_offline_SUITE +, logging_SUITE +, lqueue_SUITE +, maintenance_mode_SUITE +, mc_unit_SUITE +, message_containers_deaths_v2_SUITE +, message_size_limit_SUITE +, metadata_store_migration_SUITE +, metadata_store_phase1_SUITE +, metrics_SUITE +, mirrored_supervisor_SUITE +, msg_store_SUITE +, peer_discovery_classic_config_SUITE +]}. + +{define, 'Set4', [ + peer_discovery_dns_SUITE +, peer_discovery_tmp_hidden_node_SUITE +, per_node_limit_SUITE +, per_user_connection_channel_limit_SUITE +, per_user_connection_channel_tracking_SUITE +, per_user_connection_tracking_SUITE +, per_vhost_connection_limit_SUITE +, per_vhost_msg_store_SUITE +, per_vhost_queue_limit_SUITE +, policy_SUITE +, priority_queue_SUITE +, priority_queue_recovery_SUITE +, product_info_SUITE +, proxy_protocol_SUITE +, publisher_confirms_parallel_SUITE +]}. + +{define, 'Set5', [ + clustering_recovery_SUITE +, metadata_store_clustering_SUITE +, queue_length_limits_SUITE +, queue_parallel_SUITE +, quorum_queue_SUITE +, rabbit_access_control_SUITE +, rabbit_confirms_SUITE +, rabbit_core_metrics_gc_SUITE +, rabbit_cuttlefish_SUITE +, rabbit_db_binding_SUITE +, rabbit_db_exchange_SUITE +, rabbit_db_maintenance_SUITE +, rabbit_db_msup_SUITE +, rabbit_db_policy_SUITE +, rabbit_db_queue_SUITE +, rabbit_db_topic_exchange_SUITE +, rabbit_direct_reply_to_prop_SUITE +]}. + +{define, 'Set6', [ + queue_type_SUITE +, quorum_queue_member_reconciliation_SUITE +, rabbit_fifo_SUITE +, rabbit_fifo_dlx_SUITE +, rabbit_fifo_dlx_integration_SUITE +, rabbit_fifo_int_SUITE +, rabbit_fifo_prop_SUITE +, rabbit_fifo_v0_SUITE +, rabbit_local_random_exchange_SUITE +, rabbit_message_interceptor_SUITE +, rabbit_stream_coordinator_SUITE +, rabbit_stream_sac_coordinator_SUITE +, rabbitmq_4_0_deprecations_SUITE +, rabbitmq_queues_cli_integration_SUITE +, rabbitmqctl_integration_SUITE +, rabbitmqctl_shutdown_SUITE +, routing_SUITE +, runtime_parameters_SUITE +]}. + +{define, 'Set7', [ + cluster_limit_SUITE +, cluster_minority_SUITE +, clustering_management_SUITE +, signal_handling_SUITE +, single_active_consumer_SUITE +, term_to_binary_compat_prop_SUITE +, topic_permission_SUITE +, transactions_SUITE +, unicode_SUITE +, unit_access_control_SUITE +, unit_access_control_authn_authz_context_propagation_SUITE +, unit_access_control_credential_validation_SUITE +, unit_amqp091_content_framing_SUITE +, unit_amqp091_server_properties_SUITE +, unit_app_management_SUITE +, unit_cluster_formation_locking_mocks_SUITE +, unit_cluster_formation_sort_nodes_SUITE +, unit_collections_SUITE +, unit_config_value_encryption_SUITE +, unit_connection_tracking_SUITE +]}. + +{define, 'Set8', [ + dead_lettering_SUITE +, definition_import_SUITE +, per_user_connection_channel_limit_partitions_SUITE +, per_vhost_connection_limit_partitions_SUITE +, unit_credit_flow_SUITE +, unit_disk_monitor_SUITE +, unit_file_handle_cache_SUITE +, unit_gen_server2_SUITE +, unit_log_management_SUITE +, unit_operator_policy_SUITE +, unit_pg_local_SUITE +, unit_plugin_directories_SUITE +, unit_plugin_versioning_SUITE +, unit_policy_validators_SUITE +, unit_priority_queue_SUITE +, unit_queue_consumers_SUITE +, unit_queue_location_SUITE +, unit_quorum_queue_SUITE +, unit_stats_and_metrics_SUITE +, unit_supervisor2_SUITE +, unit_vm_memory_monitor_SUITE +, upgrade_preparation_SUITE +, vhost_SUITE +]}. + +{suites, shard1, "test/", 'Set1'}. +{suites, shard1, "test/", 'Set2'}. + +{suites, shard2, "test/", 'Set3'}. +{suites, shard2, "test/", 'Set4'}. + +{suites, shard3, "test/", 'Set5'}. +{suites, shard3, "test/", 'Set6'}. + +{suites, shard4, "test/", 'Set7'}. +{suites, shard4, "test/", 'Set8'}. diff --git a/deps/rabbit/src/rabbit_channel_tracking.erl b/deps/rabbit/src/rabbit_channel_tracking.erl index 0931352416df..bd37d59bf9e8 100644 --- a/deps/rabbit/src/rabbit_channel_tracking.erl +++ b/deps/rabbit/src/rabbit_channel_tracking.erl @@ -126,7 +126,7 @@ unregister_tracked_by_pid(ChPid) when node(ChPid) == node() -> case ets:lookup(?TRACKED_CHANNEL_TABLE, ChPid) of [] -> ok; [#tracked_channel{username = Username}] -> - ets:update_counter(?TRACKED_CHANNEL_TABLE_PER_USER, Username, -1), + _ = ets:update_counter(?TRACKED_CHANNEL_TABLE_PER_USER, Username, -1), ets:delete(?TRACKED_CHANNEL_TABLE, ChPid) end. @@ -139,7 +139,7 @@ unregister_tracked(ChId = {Node, _Name}) when Node == node() -> case get_tracked_channel_by_id(ChId) of [] -> ok; [#tracked_channel{pid = ChPid, username = Username}] -> - ets:update_counter(?TRACKED_CHANNEL_TABLE_PER_USER, Username, -1), + _ = ets:update_counter(?TRACKED_CHANNEL_TABLE_PER_USER, Username, -1), ets:delete(?TRACKED_CHANNEL_TABLE, ChPid) end. diff --git a/deps/rabbit/src/rabbit_connection_tracking.erl b/deps/rabbit/src/rabbit_connection_tracking.erl index 220c29016698..0a84a7620711 100644 --- a/deps/rabbit/src/rabbit_connection_tracking.erl +++ b/deps/rabbit/src/rabbit_connection_tracking.erl @@ -151,8 +151,8 @@ unregister_tracked(ConnId = {Node, _Name}) when Node =:= node() -> case ets:lookup(?TRACKED_CONNECTION_TABLE, ConnId) of [] -> ok; [#tracked_connection{vhost = VHost, username = Username}] -> - ets:update_counter(?TRACKED_CONNECTION_TABLE_PER_USER, Username, -1), - ets:update_counter(?TRACKED_CONNECTION_TABLE_PER_VHOST, VHost, -1), + _ = ets:update_counter(?TRACKED_CONNECTION_TABLE_PER_USER, Username, -1), + _ = ets:update_counter(?TRACKED_CONNECTION_TABLE_PER_VHOST, VHost, -1), ets:delete(?TRACKED_CONNECTION_TABLE, ConnId) end. diff --git a/deps/rabbit/src/rabbit_msg_store.erl b/deps/rabbit/src/rabbit_msg_store.erl index a9c25d822bf9..efd8d53a0507 100644 --- a/deps/rabbit/src/rabbit_msg_store.erl +++ b/deps/rabbit/src/rabbit_msg_store.erl @@ -1050,7 +1050,7 @@ internal_sync(State = #msstate { current_file_handle = CurHdl, flying_write(Key, #msstate { flying_ets = FlyingEts }) -> case ets:lookup(FlyingEts, Key) of [{_, ?FLYING_WRITE}] -> - ets:update_counter(FlyingEts, Key, ?FLYING_WRITE_DONE), + _ = ets:update_counter(FlyingEts, Key, ?FLYING_WRITE_DONE), %% We only remove the object if it hasn't changed %% (a remove may be sent while we were processing the write). true = ets:delete_object(FlyingEts, {Key, ?FLYING_IS_WRITTEN}), @@ -1318,7 +1318,7 @@ update_msg_cache(CacheEts, MsgId, Msg) -> %% but without the debug log that we don't want as the update is %% more likely to fail following recent reworkings. try - ets:update_counter(CacheEts, MsgId, {3, +1}), + _ = ets:update_counter(CacheEts, MsgId, {3, +1}), ok catch error:badarg -> %% The entry must have been removed between diff --git a/deps/rabbit/src/rabbit_time_travel_dbg.erl b/deps/rabbit/src/rabbit_time_travel_dbg.erl index 4ab6674514de..7d8b480e5ac9 100644 --- a/deps/rabbit/src/rabbit_time_travel_dbg.erl +++ b/deps/rabbit/src/rabbit_time_travel_dbg.erl @@ -28,7 +28,7 @@ start(Pid, Apps) -> TracerPid = spawn_link(?MODULE, init, []), {ok, _} = dbg:tracer(process, {fun (Msg, _) -> TracerPid ! Msg end, []}), _ = [dbg:tpl(M, []) || M <- Mods], - dbg:p(Pid, [c]), + _ = dbg:p(Pid, [c]), ok. apps_to_mods([], Acc) -> diff --git a/deps/rabbit/test/backing_queue_SUITE.erl b/deps/rabbit/test/backing_queue_SUITE.erl index 152daf043081..2735478986b9 100644 --- a/deps/rabbit/test/backing_queue_SUITE.erl +++ b/deps/rabbit/test/backing_queue_SUITE.erl @@ -517,6 +517,7 @@ msg_store_file_scan1(Config) -> Expected = gen_result(Blocks), Path = gen_msg_file(Config, Blocks), Result = rabbit_msg_store:scan_file_for_valid_messages(Path), + ok = file:delete(Path), case Result of Expected -> ok; _ -> {expected, Expected, got, Result} diff --git a/deps/rabbit/test/cluster_limit_SUITE.erl b/deps/rabbit/test/cluster_limit_SUITE.erl index c8aa31614587..22d5c24e0d65 100644 --- a/deps/rabbit/test/cluster_limit_SUITE.erl +++ b/deps/rabbit/test/cluster_limit_SUITE.erl @@ -54,8 +54,7 @@ init_per_group(Group, Config) -> [{rmq_nodes_count, ClusterSize}, {rmq_nodename_suffix, Group}, {tcp_ports_base}]), - Config1b = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]), - rabbit_ct_helpers:run_steps(Config1b, + rabbit_ct_helpers:run_steps(Config1, [fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps()) end. diff --git a/deps/rabbit/test/cluster_upgrade_SUITE.erl b/deps/rabbit/test/cluster_upgrade_SUITE.erl index 2b78f119c904..ea943f1cc0f8 100644 --- a/deps/rabbit/test/cluster_upgrade_SUITE.erl +++ b/deps/rabbit/test/cluster_upgrade_SUITE.erl @@ -55,7 +55,7 @@ init_per_testcase(Testcase, Config) -> Config1 = rabbit_ct_helpers:set_config(Config, [ {rmq_nodename_suffix, Testcase}, {rmq_nodes_count, 3}, - {force_secondary_umbrella, true} + {force_secondary, true} ]), Config2 = rabbit_ct_helpers:run_steps(Config1, rabbit_ct_broker_helpers:setup_steps() ++ @@ -139,7 +139,7 @@ upgrade_cluster(Config) -> || N <- Cluster], ct:pal(?LOW_IMPORTANCE, "Restarting cluster ~p", [Cluster]), Config1 = rabbit_ct_helpers:set_config( - Config, {force_secondary_umbrella, false}), + Config, {force_secondary, false}), [ok = rabbit_ct_broker_helpers:async_start_node(Config1, N) || N <- Cluster], [ok = rabbit_ct_broker_helpers:wait_for_async_start_node(N) diff --git a/deps/rabbit/test/deprecated_features_SUITE.erl b/deps/rabbit/test/deprecated_features_SUITE.erl index 6d8ead9d371a..3f4ea21eba8c 100644 --- a/deps/rabbit/test/deprecated_features_SUITE.erl +++ b/deps/rabbit/test/deprecated_features_SUITE.erl @@ -85,9 +85,7 @@ groups() -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), logger:set_primary_config(level, debug), - rabbit_ct_helpers:run_setup_steps( - Config, - [fun rabbit_ct_helpers:redirect_logger_to_ct_logs/1]). + rabbit_ct_helpers:run_setup_steps(Config, []). end_per_suite(Config) -> Config. diff --git a/deps/rabbit/test/feature_flags_SUITE.erl b/deps/rabbit/test/feature_flags_SUITE.erl index 55a469209202..72df3c0469bd 100644 --- a/deps/rabbit/test/feature_flags_SUITE.erl +++ b/deps/rabbit/test/feature_flags_SUITE.erl @@ -64,6 +64,7 @@ groups() -> [ enable_feature_flag_in_a_healthy_situation, enable_unsupported_feature_flag_in_a_healthy_situation, + enable_feature_flag_when_ff_file_is_unwritable, required_feature_flag_enabled_by_default, required_plugin_feature_flag_enabled_by_default, required_plugin_feature_flag_enabled_after_activation, @@ -73,6 +74,7 @@ groups() -> [ enable_feature_flag_in_a_healthy_situation, enable_unsupported_feature_flag_in_a_healthy_situation, + enable_feature_flag_when_ff_file_is_unwritable, enable_feature_flag_with_a_network_partition, mark_feature_flag_as_enabled_with_a_network_partition, required_feature_flag_enabled_by_default, @@ -122,10 +124,9 @@ end_per_suite(Config) -> init_per_group(registry, Config) -> logger:set_primary_config(level, debug), - rabbit_ct_helpers:run_steps( - Config, - [fun rabbit_ct_helpers:redirect_logger_to_ct_logs/1]); + rabbit_ct_helpers:run_steps(Config, []); init_per_group(feature_flags_v2, Config) -> + %% @todo Remove this entirely as that FF became required in 3.12. %% `feature_flags_v2' is now required and won't work in mixed-version %% clusters if the other version doesn't support it. case rabbit_ct_helpers:is_mixed_versions() of @@ -267,6 +268,7 @@ init_per_testcase(Testcase, Config) -> Config2 = rabbit_ct_helpers:set_config( Config1, [{rmq_nodename_suffix, Testcase}, + {secondary_enabled_plugins, "my_plugin"}, {tcp_ports_base, {skip_n_nodes, TestNumber * ClusterSize}}, {net_ticktime, 5} @@ -655,6 +657,7 @@ enable_unsupported_feature_flag_in_a_healthy_situation(Config) -> False, is_feature_flag_enabled(Config, FeatureName)). +%% This test case must run as an unprivileged user. enable_feature_flag_when_ff_file_is_unwritable(Config) -> Supported = rabbit_ct_broker_helpers:is_feature_flag_supported( Config, ff_from_testsuite), diff --git a/deps/rabbit/test/feature_flags_v2_SUITE.erl b/deps/rabbit/test/feature_flags_v2_SUITE.erl index 8678d7a2d877..37e881597153 100644 --- a/deps/rabbit/test/feature_flags_v2_SUITE.erl +++ b/deps/rabbit/test/feature_flags_v2_SUITE.erl @@ -114,9 +114,7 @@ groups() -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), logger:set_primary_config(level, debug), - rabbit_ct_helpers:run_steps( - Config, - [fun rabbit_ct_helpers:redirect_logger_to_ct_logs/1]). + rabbit_ct_helpers:run_steps(Config, []). end_per_suite(Config) -> Config. @@ -169,7 +167,15 @@ start_slave_node(Parent, Config, Testcase, N) -> Name = list_to_atom( rabbit_misc:format("~ts-~b", [Testcase, N])), ct:pal("- Starting slave node `~ts@...`", [Name]), - {ok, Node} = slave:start(net_adm:localhost(), Name), + {ok, NodePid, Node} = peer:start(#{ + name => Name, + connection => standard_io, + shutdown => close + }), + peer:call(NodePid, net_kernel, set_net_ticktime, [5]), + + persistent_term:put({?MODULE, Node}, NodePid), + ct:pal("- Slave node `~ts` started", [Node]), TestCodePath = filename:dirname(code:which(?MODULE)), @@ -185,8 +191,16 @@ stop_slave_nodes(Config) -> rabbit_ct_helpers:delete_config(Config, nodes). stop_slave_node(Node) -> - ct:pal("- Stopping slave node `~ts`...", [Node]), - ok = slave:stop(Node). + case persistent_term:get({?MODULE, Node}, undefined) of + undefined -> + %% Node was already stopped (e.g. by the test case). + ok; + NodePid -> + persistent_term:erase({?MODULE, Node}), + + ct:pal("- Stopping slave node `~ts`...", [Node]), + ok = peer:stop(NodePid) + end. connect_nodes([FirstNode | OtherNodes] = Nodes) -> lists:foreach( diff --git a/deps/rabbit/test/feature_flags_with_unpriveleged_user_SUITE.erl b/deps/rabbit/test/feature_flags_with_unpriveleged_user_SUITE.erl deleted file mode 100644 index d8b627da39d4..000000000000 --- a/deps/rabbit/test/feature_flags_with_unpriveleged_user_SUITE.erl +++ /dev/null @@ -1,72 +0,0 @@ -%% This Source Code Form is subject to the terms of the Mozilla Public -%% License, v. 2.0. If a copy of the MPL was not distributed with this -%% file, You can obtain one at https://mozilla.org/MPL/2.0/. -%% -%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. -%% - --module(feature_flags_with_unpriveleged_user_SUITE). - --include_lib("eunit/include/eunit.hrl"). - --export([suite/0, - all/0, - groups/0, - init_per_suite/1, - end_per_suite/1, - init_per_group/2, - end_per_group/2, - init_per_testcase/2, - end_per_testcase/2, - - enable_feature_flag_when_ff_file_is_unwritable/1 - ]). - -suite() -> - [{timetrap, {minutes, 5}}]. - -all() -> - [ - {group, enabling_on_single_node}, - {group, enabling_in_cluster} - ]. - -groups() -> - [ - {enabling_on_single_node, [], - [ - enable_feature_flag_when_ff_file_is_unwritable - ]}, - {enabling_in_cluster, [], - [ - enable_feature_flag_when_ff_file_is_unwritable - ]} - ]. - -%% This suite exists to allow running a portion of the feature_flags_SUITE -%% under separate conditions in ci - -init_per_suite(Config) -> - feature_flags_SUITE:init_per_suite(Config). - -end_per_suite(Config) -> - feature_flags_SUITE:end_per_suite(Config). - -init_per_group(Group, Config) -> - feature_flags_SUITE:init_per_group(Group, Config). - -end_per_group(Group, Config) -> - feature_flags_SUITE:end_per_group(Group, Config). - -init_per_testcase(Testcase, Config) -> - feature_flags_SUITE:init_per_testcase(Testcase, Config). - -end_per_testcase(Testcase, Config) -> - feature_flags_SUITE:end_per_testcase(Testcase, Config). - -%% ------------------------------------------------------------------- -%% Testcases. -%% ------------------------------------------------------------------- - -enable_feature_flag_when_ff_file_is_unwritable(Config) -> - feature_flags_SUITE:enable_feature_flag_when_ff_file_is_unwritable(Config). diff --git a/deps/rabbit/test/metadata_store_clustering_SUITE.erl b/deps/rabbit/test/metadata_store_clustering_SUITE.erl index b648ac0a284f..a33241d263cb 100644 --- a/deps/rabbit/test/metadata_store_clustering_SUITE.erl +++ b/deps/rabbit/test/metadata_store_clustering_SUITE.erl @@ -112,8 +112,7 @@ end_per_suite(Config) -> init_per_group(unclustered, Config) -> rabbit_ct_helpers:set_config(Config, [{metadata_store, mnesia}, {rmq_nodes_clustered, false}, - {tcp_ports_base}, - {net_ticktime, 10}]); + {tcp_ports_base}]); init_per_group(cluster_size_2, Config) -> rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, 2}]); init_per_group(cluster_size_3, Config) -> diff --git a/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl b/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl index 1e18f808ceef..8862ddd3dd7a 100644 --- a/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl +++ b/deps/rabbit/test/per_vhost_connection_limit_SUITE.erl @@ -105,9 +105,8 @@ init_per_multinode_group(_Group, Config, NodeCount) -> rabbit_ct_broker_helpers:setup_steps() ++ rabbit_ct_client_helpers:setup_steps()). -end_per_group(Group, Config) when Group == tests; - Group == khepri_migration -> - % The broker is managed by {init,end}_per_testcase(). +end_per_group(Group, Config) when Group == tests -> + % The broker is managed by sub-groups. Config; end_per_group(_Group, Config) -> rabbit_ct_helpers:run_steps(Config, diff --git a/deps/rabbit/test/queue_parallel_SUITE.erl b/deps/rabbit/test/queue_parallel_SUITE.erl index 2b4c4735bcd6..5ee1c3232639 100644 --- a/deps/rabbit/test/queue_parallel_SUITE.erl +++ b/deps/rabbit/test/queue_parallel_SUITE.erl @@ -646,7 +646,11 @@ delete_immediately_by_resource(Config) -> ok. cc_header_non_array_should_close_channel(Config) -> - {C, Ch} = rabbit_ct_client_helpers:open_connection_and_channel(Config, 0), + %% We use an unmanaged connection to avoid issues with + %% tests running in parallel: in this test we expect the + %% channel to close, but that channel is reused in other tests. + C = rabbit_ct_client_helpers:open_unmanaged_connection(Config, 0), + {ok, Ch} = amqp_connection:open_channel(C), Name0 = ?FUNCTION_NAME, Name = atom_to_binary(Name0), QName = <<"queue_cc_header_non_array", Name/binary>>, diff --git a/deps/rabbit/test/queue_type_SUITE.erl b/deps/rabbit/test/queue_type_SUITE.erl index d89859e4703b..28352212dfb1 100644 --- a/deps/rabbit/test/queue_type_SUITE.erl +++ b/deps/rabbit/test/queue_type_SUITE.erl @@ -56,8 +56,7 @@ init_per_group(Group, Config) -> {tcp_ports_base, {skip_n_nodes, ClusterSize}} ]), Config1b = rabbit_ct_helpers:set_config(Config1, - [{queue_type, atom_to_binary(Group, utf8)}, - {net_ticktime, 5} + [{queue_type, atom_to_binary(Group, utf8)} ]), Config2 = rabbit_ct_helpers:run_steps(Config1b, [fun merge_app_env/1 ] ++ diff --git a/deps/rabbit/test/quorum_queue_member_reconciliation_SUITE.erl b/deps/rabbit/test/quorum_queue_member_reconciliation_SUITE.erl index 73a51b2c5a4d..453d648a353c 100644 --- a/deps/rabbit/test/quorum_queue_member_reconciliation_SUITE.erl +++ b/deps/rabbit/test/quorum_queue_member_reconciliation_SUITE.erl @@ -51,8 +51,7 @@ init_per_group(Group, Config) -> [{rmq_nodes_count, ClusterSize}, {rmq_nodename_suffix, Group}, {tcp_ports_base}]), - Config1b = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]), - rabbit_ct_helpers:run_steps(Config1b, + rabbit_ct_helpers:run_steps(Config1, [fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps()). diff --git a/deps/rabbit/test/rabbit_ct_hook.erl b/deps/rabbit/test/rabbit_ct_hook.erl new file mode 100644 index 000000000000..07097a57e0fa --- /dev/null +++ b/deps/rabbit/test/rabbit_ct_hook.erl @@ -0,0 +1,7 @@ +-module(rabbit_ct_hook). + +-export([init/2]). + +init(_, _) -> + _ = rabbit_ct_helpers:redirect_logger_to_ct_logs([]), + {ok, undefined}. diff --git a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl index 619fb4e06bdb..5d4c39958e1c 100644 --- a/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl +++ b/deps/rabbit/test/rabbit_fifo_dlx_integration_SUITE.erl @@ -95,8 +95,7 @@ init_per_group(Group, Config, NodesCount) -> Config1 = rabbit_ct_helpers:set_config(Config, [{rmq_nodes_count, NodesCount}, {rmq_nodename_suffix, Group}, - {tcp_ports_base}, - {net_ticktime, 10}]), + {tcp_ports_base}]), Config2 = rabbit_ct_helpers:run_steps(Config1, [fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps()), diff --git a/deps/rabbit/test/rabbit_stream_queue_SUITE.erl b/deps/rabbit/test/rabbit_stream_queue_SUITE.erl index 3a74b4753bd0..c8b2f8aabce9 100644 --- a/deps/rabbit/test/rabbit_stream_queue_SUITE.erl +++ b/deps/rabbit/test/rabbit_stream_queue_SUITE.erl @@ -227,15 +227,14 @@ init_per_group1(Group, Config) -> {rmq_nodename_suffix, Group}, {tcp_ports_base}, {rmq_nodes_clustered, Clustered}]), - Config1b = rabbit_ct_helpers:set_config(Config1, [{net_ticktime, 10}]), - Config1c = case Group of + Config1b = case Group of unclustered_size_3_4 -> rabbit_ct_helpers:merge_app_env( - Config1b, {rabbit, [{stream_tick_interval, 5000}]}); + Config1, {rabbit, [{stream_tick_interval, 5000}]}); _ -> - Config1b + Config1 end, - Ret = rabbit_ct_helpers:run_steps(Config1c, + Ret = rabbit_ct_helpers:run_steps(Config1b, [fun merge_app_env/1 ] ++ rabbit_ct_broker_helpers:setup_steps()), case Ret of diff --git a/deps/rabbit/test/rabbitmq_4_0_deprecations_SUITE.erl b/deps/rabbit/test/rabbitmq_4_0_deprecations_SUITE.erl index f0e05e580e0d..d5f5f147782a 100644 --- a/deps/rabbit/test/rabbitmq_4_0_deprecations_SUITE.erl +++ b/deps/rabbit/test/rabbitmq_4_0_deprecations_SUITE.erl @@ -75,9 +75,7 @@ groups() -> init_per_suite(Config) -> rabbit_ct_helpers:log_environment(), logger:set_primary_config(level, debug), - rabbit_ct_helpers:run_setup_steps( - Config, - [fun rabbit_ct_helpers:redirect_logger_to_ct_logs/1]). + rabbit_ct_helpers:run_setup_steps(Config, []). end_per_suite(Config) -> Config. diff --git a/deps/rabbit/test/unit_config_value_encryption_SUITE.erl b/deps/rabbit/test/unit_config_value_encryption_SUITE.erl index 72968c0b37ac..297da7493cbf 100644 --- a/deps/rabbit/test/unit_config_value_encryption_SUITE.erl +++ b/deps/rabbit/test/unit_config_value_encryption_SUITE.erl @@ -68,7 +68,10 @@ decrypt_config(_Config) -> ok. do_decrypt_config(Algo = {C, H, I, P}) -> - ok = application:load(rabbit), + case application:load(rabbit) of + ok -> ok; + {error, {already_loaded, rabbit}} -> ok + end, RabbitConfig = application:get_all_env(rabbit), %% Encrypt a few values in configuration. %% Common cases. diff --git a/deps/rabbit_common/Makefile b/deps/rabbit_common/Makefile index f4a56200f693..857cee1ade5d 100644 --- a/deps/rabbit_common/Makefile +++ b/deps/rabbit_common/Makefile @@ -38,12 +38,10 @@ DEPS = thoas ranch recon credentials_obfuscation -include development.pre.mk -DEP_EARLY_PLUGINS = $(PROJECT)/mk/rabbitmq-early-test.mk +DEP_EARLY_PLUGINS = $(PROJECT)/mk/rabbitmq-early-plugin.mk +# We do not depend on rabbit therefore can't run the broker. DEP_PLUGINS = $(PROJECT)/mk/rabbitmq-build.mk \ - $(PROJECT)/mk/rabbitmq-hexpm.mk \ - $(PROJECT)/mk/rabbitmq-dist.mk \ - $(PROJECT)/mk/rabbitmq-test.mk \ - $(PROJECT)/mk/rabbitmq-tools.mk + $(PROJECT)/mk/rabbitmq-hexpm.mk PLT_APPS += mnesia crypto ssl @@ -54,9 +52,7 @@ HEX_TARBALL_FILES += rabbitmq-components.mk \ git-revisions.txt \ mk/rabbitmq-build.mk \ mk/rabbitmq-dist.mk \ - mk/rabbitmq-early-test.mk \ - mk/rabbitmq-hexpm.mk \ - mk/rabbitmq-test.mk \ - mk/rabbitmq-tools.mk + mk/rabbitmq-early-plugin.mk \ + mk/rabbitmq-hexpm.mk -include development.post.mk diff --git a/deps/rabbit_common/mk/rabbitmq-build.mk b/deps/rabbit_common/mk/rabbitmq-build.mk index 010045f5c37a..93d9613c17ce 100644 --- a/deps/rabbit_common/mk/rabbitmq-build.mk +++ b/deps/rabbit_common/mk/rabbitmq-build.mk @@ -8,9 +8,10 @@ TEST_ERLC_OPTS += +nowarn_export_all -ifneq ($(filter-out rabbit_common amqp_client,$(PROJECT)),) +ifneq ($(filter rabbitmq_cli,$(BUILD_DEPS) $(DEPS)),) # Add the CLI ebin directory to the code path for the compiler: plugin # CLI extensions may access behaviour modules defined in this directory. + RMQ_ERLC_OPTS += -pa $(DEPS_DIR)/rabbitmq_cli/_build/dev/lib/rabbitmqctl/ebin endif diff --git a/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk b/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk index faf75872024e..e9a1ac0db080 100644 --- a/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk +++ b/deps/rabbit_common/mk/rabbitmq-components.hexpm.mk @@ -6,25 +6,30 @@ ifeq ($(.DEFAULT_GOAL),) endif # PROJECT_VERSION defaults to: -# 1. the version exported by rabbitmq-server-release; +# 1. the version exported by environment; # 2. the version stored in `git-revisions.txt`, if it exists; # 3. a version based on git-describe(1), if it is a Git clone; # 4. 0.0.0 +# +# Note that in the case where git-describe(1) is used +# (e.g. during development), running "git gc" may help +# improve the performance. PROJECT_VERSION := $(RABBITMQ_VERSION) ifeq ($(PROJECT_VERSION),) -PROJECT_VERSION := $(shell \ -if test -f git-revisions.txt; then \ +ifneq ($(wildcard git-revisions.txt),) +PROJECT_VERSION = $(shell \ head -n1 git-revisions.txt | \ - awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \ -else \ + awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}') +else +PROJECT_VERSION = $(shell \ (git describe --dirty --abbrev=7 --tags --always --first-parent \ - 2>/dev/null || echo rabbitmq_v0_0_0) | \ - sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \ - -e 's/-/./g'; \ -fi) + 2>/dev/null || echo 0.0.0) | \ + sed -e 's/^v//' -e 's/_/./g' -e 's/-/+/' -e 's/-/./g') endif +endif + # -------------------------------------------------------------------- # RabbitMQ components. diff --git a/deps/rabbit_common/mk/rabbitmq-dist.mk b/deps/rabbit_common/mk/rabbitmq-dist.mk index 1d0254452fec..10ee9938e849 100644 --- a/deps/rabbit_common/mk/rabbitmq-dist.mk +++ b/deps/rabbit_common/mk/rabbitmq-dist.mk @@ -1,8 +1,8 @@ .PHONY: dist test-dist do-dist cli-scripts cli-escripts clean-dist -DIST_DIR = plugins -CLI_SCRIPTS_DIR = sbin -CLI_ESCRIPTS_DIR = escript +DIST_DIR ?= $(CURDIR)/plugins +CLI_SCRIPTS_DIR ?= $(CURDIR)/sbin +CLI_ESCRIPTS_DIR ?= $(CURDIR)/escript MIX = echo y | mix # Set $(DIST_AS_EZS) to a non-empty value to enable the packaging of @@ -212,7 +212,10 @@ CLI_ESCRIPTS_LOCK = $(CLI_ESCRIPTS_DIR).lock ifeq ($(MAKELEVEL),0) ifneq ($(filter-out rabbit_common amqp10_common rabbitmq_stream_common,$(PROJECT)),) +# These do not depend on 'rabbit' as DEPS but may as TEST_DEPS. +ifneq ($(filter-out amqp_client amqp10_client rabbitmq_amqp_client rabbitmq_ct_helpers,$(PROJECT)),) app:: install-cli +endif test-build:: install-cli endif endif diff --git a/deps/rabbit_common/mk/rabbitmq-early-plugin.mk b/deps/rabbit_common/mk/rabbitmq-early-plugin.mk index 7b5f14b8f912..1b8aaa3f422a 100644 --- a/deps/rabbit_common/mk/rabbitmq-early-plugin.mk +++ b/deps/rabbit_common/mk/rabbitmq-early-plugin.mk @@ -1,3 +1,65 @@ -ifeq ($(filter rabbitmq-early-test.mk,$(notdir $(MAKEFILE_LIST))),) -include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-early-test.mk +# -------------------------------------------------------------------- +# dialyzer +# -------------------------------------------------------------------- + +DIALYZER_OPTS ?= -Werror_handling -Wunmatched_returns -Wunknown + +dialyze: ERL_LIBS = $(APPS_DIR):$(DEPS_DIR):$(DEPS_DIR)/rabbitmq_cli/_build/dev/lib:$(dir $(shell elixir --eval ":io.format '~s~n', [:code.lib_dir :elixir ]")) + +# -------------------------------------------------------------------- +# Common Test flags. +# -------------------------------------------------------------------- + +ifneq ($(PROJECT),rabbitmq_server_release) +CT_LOGS_DIR = $(abspath $(CURDIR)/../../logs) +endif + +# We start the common_test node as a hidden Erlang node. The benefit +# is that other Erlang nodes won't try to connect to each other after +# discovering the common_test node if they are not meant to. +# +# This helps when several unrelated RabbitMQ clusters are started in +# parallel. + +CT_OPTS += -hidden + +# We set a low tick time to deal with distribution failures quicker. + +CT_OPTS += -kernel net_ticktime 5 + +# Enable the following common_test hooks on GH and Concourse: +# +# cth_fail_fast +# This hook will make sure the first failure puts an end to the +# testsuites; ie. all remaining tests are skipped. +# +# cth_styledout +# This hook will change the output of common_test to something more +# concise and colored. + +CT_HOOKS ?= cth_styledout +TEST_DEPS += cth_styledout + +ifdef CONCOURSE +FAIL_FAST = 1 +SKIP_AS_ERROR = 1 +endif + +RMQ_CI_CT_HOOKS = cth_fail_fast +ifeq ($(FAIL_FAST),1) +CT_HOOKS += $(RMQ_CI_CT_HOOKS) +TEST_DEPS += $(RMQ_CI_CT_HOOKS) +endif + +dep_cth_fail_fast = git https://github.com/rabbitmq/cth_fail_fast.git master +dep_cth_styledout = git https://github.com/rabbitmq/cth_styledout.git master + +CT_HOOKS_PARAM_VALUE = $(patsubst %,and %,$(CT_HOOKS)) +CT_OPTS += -ct_hooks $(wordlist 2,$(words $(CT_HOOKS_PARAM_VALUE)),$(CT_HOOKS_PARAM_VALUE)) + +# On CI, set $RABBITMQ_CT_SKIP_AS_ERROR so that any skipped +# testsuite/testgroup/testcase is considered an error. + +ifeq ($(SKIP_AS_ERROR),1) +export RABBITMQ_CT_SKIP_AS_ERROR = true endif diff --git a/deps/rabbit_common/mk/rabbitmq-early-test.mk b/deps/rabbit_common/mk/rabbitmq-early-test.mk deleted file mode 100644 index 3779bd4a2fe7..000000000000 --- a/deps/rabbit_common/mk/rabbitmq-early-test.mk +++ /dev/null @@ -1,72 +0,0 @@ -# -------------------------------------------------------------------- -# dialyzer -# -------------------------------------------------------------------- - -DIALYZER_OPTS ?= -Werror_handling -Wunmatched_returns -Wunknown - -dialyze: ERL_LIBS = $(APPS_DIR):$(DEPS_DIR):$(DEPS_DIR)/rabbitmq_cli/_build/dev/lib:$(dir $(shell elixir --eval ":io.format '~s~n', [:code.lib_dir :elixir ]")) - -# -------------------------------------------------------------------- -# %-on-concourse dependencies. -# -------------------------------------------------------------------- - -ifneq ($(words $(filter %-on-concourse,$(MAKECMDGOALS))),0) -TEST_DEPS += ci $(RMQ_CI_CT_HOOKS) -NO_AUTOPATCH += ci $(RMQ_CI_CT_HOOKS) -dep_ci = git git@github.com:rabbitmq/rabbitmq-ci.git main -endif - -# -------------------------------------------------------------------- -# Common Test flags. -# -------------------------------------------------------------------- - -# We start the common_test node as a hidden Erlang node. The benefit -# is that other Erlang nodes won't try to connect to each other after -# discovering the common_test node if they are not meant to. -# -# This helps when several unrelated RabbitMQ clusters are started in -# parallel. - -CT_OPTS += -hidden - -# Enable the following common_test hooks on GH and Concourse: -# -# cth_fail_fast -# This hook will make sure the first failure puts an end to the -# testsuites; ie. all remaining tests are skipped. -# -# cth_styledout -# This hook will change the output of common_test to something more -# concise and colored. -# -# On Jenkins, in addition to those common_test hooks, enable JUnit-like -# report. Jenkins parses those reports so the results can be browsed -# from its UI. Furthermore, it displays a graph showing evolution of the -# results over time. - -CT_HOOKS ?= cth_styledout -TEST_DEPS += cth_styledout - -ifdef CONCOURSE -FAIL_FAST = 1 -SKIP_AS_ERROR = 1 -endif - -RMQ_CI_CT_HOOKS = cth_fail_fast -ifeq ($(FAIL_FAST),1) -CT_HOOKS += $(RMQ_CI_CT_HOOKS) -TEST_DEPS += $(RMQ_CI_CT_HOOKS) -endif - -dep_cth_fail_fast = git https://github.com/rabbitmq/cth_fail_fast.git master -dep_cth_styledout = git https://github.com/rabbitmq/cth_styledout.git master - -CT_HOOKS_PARAM_VALUE = $(patsubst %,and %,$(CT_HOOKS)) -CT_OPTS += -ct_hooks $(wordlist 2,$(words $(CT_HOOKS_PARAM_VALUE)),$(CT_HOOKS_PARAM_VALUE)) - -# On CI, set $RABBITMQ_CT_SKIP_AS_ERROR so that any skipped -# testsuite/testgroup/testcase is considered an error. - -ifeq ($(SKIP_AS_ERROR),1) -export RABBITMQ_CT_SKIP_AS_ERROR = true -endif diff --git a/deps/rabbit_common/mk/rabbitmq-hexpm.mk b/deps/rabbit_common/mk/rabbitmq-hexpm.mk index 4f314249bdf5..c4c62fdfa865 100644 --- a/deps/rabbit_common/mk/rabbitmq-hexpm.mk +++ b/deps/rabbit_common/mk/rabbitmq-hexpm.mk @@ -1,5 +1,8 @@ # -------------------------------------------------------------------- # Hex.pm. +# +# This Erlang.mk plugin should only be included by +# applications that produce an Hex.pm release. # -------------------------------------------------------------------- .PHONY: hex-publish hex-publish-docs diff --git a/deps/rabbit_common/mk/rabbitmq-plugin.mk b/deps/rabbit_common/mk/rabbitmq-plugin.mk index 29064a9a4f94..fd47b8beec21 100644 --- a/deps/rabbit_common/mk/rabbitmq-plugin.mk +++ b/deps/rabbit_common/mk/rabbitmq-plugin.mk @@ -2,10 +2,6 @@ ifeq ($(filter rabbitmq-build.mk,$(notdir $(MAKEFILE_LIST))),) include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-build.mk endif -ifeq ($(filter rabbitmq-hexpm.mk,$(notdir $(MAKEFILE_LIST))),) -include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-hexpm.mk -endif - ifeq ($(filter rabbitmq-dist.mk,$(notdir $(MAKEFILE_LIST))),) include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-dist.mk endif @@ -13,11 +9,3 @@ endif ifeq ($(filter rabbitmq-run.mk,$(notdir $(MAKEFILE_LIST))),) include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-run.mk endif - -ifeq ($(filter rabbitmq-test.mk,$(notdir $(MAKEFILE_LIST))),) -include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-test.mk -endif - -ifeq ($(filter rabbitmq-tools.mk,$(notdir $(MAKEFILE_LIST))),) -include $(dir $(lastword $(MAKEFILE_LIST)))rabbitmq-tools.mk -endif diff --git a/deps/rabbit_common/mk/rabbitmq-run.mk b/deps/rabbit_common/mk/rabbitmq-run.mk index 8a5d2894516d..f7720de345fe 100644 --- a/deps/rabbit_common/mk/rabbitmq-run.mk +++ b/deps/rabbit_common/mk/rabbitmq-run.mk @@ -19,7 +19,7 @@ TEST_TMPDIR ?= $(TMPDIR)/rabbitmq-test-instances endif # Location of the scripts controlling the broker. -RABBITMQ_SCRIPTS_DIR ?= $(CURDIR)/sbin +RABBITMQ_SCRIPTS_DIR ?= $(CLI_SCRIPTS_DIR) ifeq ($(PLATFORM),msys2) RABBITMQ_PLUGINS ?= $(RABBITMQ_SCRIPTS_DIR)/rabbitmq-plugins.bat @@ -39,7 +39,7 @@ export RABBITMQ_SCRIPTS_DIR RABBITMQCTL RABBITMQ_PLUGINS RABBITMQ_SERVER RABBITM export MAKE # We need to pass the location of codegen to the Java client ant -# process. +# process. @todo Delete? CODEGEN_DIR = $(DEPS_DIR)/rabbitmq_codegen PYTHONPATH = $(CODEGEN_DIR) export PYTHONPATH @@ -90,7 +90,7 @@ ifdef PLUGINS_FROM_DEPS_DIR RMQ_PLUGINS_DIR = $(DEPS_DIR) DIST_ERL_LIBS = $(ERL_LIBS) else -RMQ_PLUGINS_DIR = $(CURDIR)/$(DIST_DIR) +RMQ_PLUGINS_DIR = $(DIST_DIR) # We do not want to add apps/ or deps/ to ERL_LIBS # when running the release from dist. The `plugins` # directory is added to ERL_LIBS by rabbitmq-env. diff --git a/deps/rabbit_common/mk/rabbitmq-test.mk b/deps/rabbit_common/mk/rabbitmq-test.mk deleted file mode 100644 index 16cf2dc8f6bc..000000000000 --- a/deps/rabbit_common/mk/rabbitmq-test.mk +++ /dev/null @@ -1,66 +0,0 @@ -.PHONY: ct-slow ct-fast - -ct-slow ct-fast: - $(MAKE) ct CT_SUITES='$(CT_SUITES)' - -# -------------------------------------------------------------------- -# Helpers to run Make targets on Concourse. -# -------------------------------------------------------------------- - -FLY ?= fly -FLY_TARGET ?= $(shell $(FLY) targets | awk '/ci\.rabbitmq\.com/ { print $$1; }') - -CONCOURSE_TASK = $(ERLANG_MK_TMP)/concourse-task.yaml - -CI_DIR ?= $(DEPS_DIR)/ci -PIPELINE_DIR = $(CI_DIR)/server-release -BRANCH_RELEASE = $(shell "$(PIPELINE_DIR)/scripts/map-branch-to-release.sh" "$(base_rmq_ref)") -PIPELINE_DATA = $(PIPELINE_DIR)/release-data-$(BRANCH_RELEASE).yaml -REPOSITORY_NAME = $(shell "$(PIPELINE_DIR)/scripts/map-erlang-app-and-repository-name.sh" "$(PIPELINE_DATA)" "$(PROJECT)") - -CONCOURSE_PLATFORM ?= linux -ERLANG_VERSION ?= $(shell "$(PIPELINE_DIR)/scripts/list-erlang-versions.sh" "$(PIPELINE_DATA)" | head -n 1) -TASK_INPUTS = $(shell "$(PIPELINE_DIR)/scripts/list-task-inputs.sh" "$(CONCOURSE_TASK)") - -.PHONY: $(CONCOURSE_TASK) -$(CONCOURSE_TASK): $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) - $(gen_verbose) echo 'platform: $(CONCOURSE_PLATFORM)' > "$@" - $(verbose) echo 'inputs:' >> "$@" - $(verbose) echo ' - name: $(PROJECT)' >> "$@" - $(verbose) cat $(ERLANG_MK_RECURSIVE_TEST_DEPS_LIST) | while read -r file; do \ - echo " - name: $$(basename "$$file")" >> "$@"; \ - done - $(verbose) echo 'outputs:' >> "$@" - $(verbose) echo ' - name: test-output' >> "$@" -ifeq ($(CONCOURSE_PLATFORM),linux) - $(verbose) echo 'image_resource:' >> "$@" - $(verbose) echo ' type: docker-image' >> "$@" - $(verbose) echo ' source:' >> "$@" - $(verbose) echo ' repository: pivotalrabbitmq/rabbitmq-server-buildenv' >> "$@" - $(verbose) echo ' tag: linux-erlang-$(ERLANG_VERSION)' >> "$@" -endif - $(verbose) echo 'run:' >> "$@" - $(verbose) echo ' path: ci/server-release/scripts/test-erlang-app.sh' >> "$@" - $(verbose) echo ' args:' >> "$@" - $(verbose) echo " - $(PROJECT)" >> "$@" -# This section must be the last because the `%-on-concourse` target -# appends other variables. - $(verbose) echo 'params:' >> "$@" -ifdef V - $(verbose) echo ' V: "$(V)"' >> "$@" -endif -ifdef t - $(verbose) echo ' t: "$(t)"' >> "$@" -endif - -%-on-concourse: $(CONCOURSE_TASK) - $(verbose) test -d "$(PIPELINE_DIR)" - $(verbose) echo ' MAKE_TARGET: "$*"' >> "$(CONCOURSE_TASK)" - $(FLY) -t $(FLY_TARGET) execute \ - --config="$(CONCOURSE_TASK)" \ - $(foreach input,$(TASK_INPUTS), \ - $(if $(filter $(PROJECT),$(input)), \ - --input="$(input)=.", \ - --input="$(input)=$(DEPS_DIR)/$(input)")) \ - --output="test-output=$(CT_LOGS_DIR)/on-concourse" - $(verbose) rm -f "$(CT_LOGS_DIR)/on-concourse/filename" diff --git a/deps/rabbit_common/mk/rabbitmq-tools.mk b/deps/rabbit_common/mk/rabbitmq-tools.mk deleted file mode 100644 index 0e5ca370a8e4..000000000000 --- a/deps/rabbit_common/mk/rabbitmq-tools.mk +++ /dev/null @@ -1,300 +0,0 @@ -ifeq ($(PLATFORM),msys2) -HOSTNAME = $(COMPUTERNAME) -else -ifeq ($(PLATFORM),solaris) -HOSTNAME = $(shell hostname | sed 's@\..*@@') -else -HOSTNAME = $(shell hostname -s) -endif -endif - -READY_DEPS = $(foreach DEP,\ - $(filter $(RABBITMQ_COMPONENTS),$(DEPS) $(BUILD_DEPS) $(TEST_DEPS)), \ - $(if $(wildcard $(DEPS_DIR)/$(DEP)),$(DEP),)) - -RELEASED_RMQ_DEPS = $(filter $(RABBITMQ_COMPONENTS),$(DEPS) $(BUILD_DEPS)) - -.PHONY: show-upstream-git-fetch-url show-upstream-git-push-url \ - show-current-git-fetch-url show-current-git-push-url - -show-upstream-git-fetch-url: - @echo $(RABBITMQ_UPSTREAM_FETCH_URL) - -show-upstream-git-push-url: - @echo $(RABBITMQ_UPSTREAM_PUSH_URL) - -show-current-git-fetch-url: - @echo $(RABBITMQ_CURRENT_FETCH_URL) - -show-current-git-push-url: - @echo $(RABBITMQ_CURRENT_PUSH_URL) - -update-contributor-code-of-conduct: - $(verbose) for repo in $(READY_DEPS:%=$(DEPS_DIR)/%); do \ - cp $(DEPS_DIR)/rabbit_common/CODE_OF_CONDUCT.md $$repo/CODE_OF_CONDUCT.md; \ - cp $(DEPS_DIR)/rabbit_common/CONTRIBUTING.md $$repo/CONTRIBUTING.md; \ - done - -ifneq ($(wildcard .git),) - -.PHONY: sync-gitremote sync-gituser - -sync-gitremote: $(READY_DEPS:%=$(DEPS_DIR)/%+sync-gitremote) - @: - -%+sync-gitremote: - $(exec_verbose) cd $* && \ - git remote set-url origin \ - '$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(notdir $*))' - $(verbose) cd $* && \ - git remote set-url --push origin \ - '$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(notdir $*))' - -ifeq ($(origin, RMQ_GIT_GLOBAL_USER_NAME),undefined) -RMQ_GIT_GLOBAL_USER_NAME := $(shell git config --global user.name) -export RMQ_GIT_GLOBAL_USER_NAME -endif -ifeq ($(origin RMQ_GIT_GLOBAL_USER_EMAIL),undefined) -RMQ_GIT_GLOBAL_USER_EMAIL := $(shell git config --global user.email) -export RMQ_GIT_GLOBAL_USER_EMAIL -endif -ifeq ($(origin RMQ_GIT_USER_NAME),undefined) -RMQ_GIT_USER_NAME := $(shell git config user.name) -export RMQ_GIT_USER_NAME -endif -ifeq ($(origin RMQ_GIT_USER_EMAIL),undefined) -RMQ_GIT_USER_EMAIL := $(shell git config user.email) -export RMQ_GIT_USER_EMAIL -endif - -sync-gituser: $(READY_DEPS:%=$(DEPS_DIR)/%+sync-gituser) - @: - -%+sync-gituser: -ifeq ($(RMQ_GIT_USER_NAME),$(RMQ_GIT_GLOBAL_USER_NAME)) - $(exec_verbose) cd $* && git config --unset user.name || : -else - $(exec_verbose) cd $* && git config user.name "$(RMQ_GIT_USER_NAME)" -endif -ifeq ($(RMQ_GIT_USER_EMAIL),$(RMQ_GIT_GLOBAL_USER_EMAIL)) - $(verbose) cd $* && git config --unset user.email || : -else - $(verbose) cd $* && git config user.email "$(RMQ_GIT_USER_EMAIL)" -endif - -.PHONY: sync-gitignore-from-main -sync-gitignore-from-main: $(READY_DEPS:%=$(DEPS_DIR)/%+sync-gitignore-from-main) - -%+sync-gitignore-from-main: - $(gen_verbose) cd $* && \ - if test -d .git; then \ - branch=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}'); \ - ! test "$$branch" = 'main' || exit 0; \ - git show origin/main:.gitignore > .gitignore; \ - fi -ifeq ($(DO_COMMIT),yes) - $(verbose) cd $* && \ - if test -d .git; then \ - git diff --quiet .gitignore \ - || git commit -m 'Git: Sync .gitignore from main' .gitignore; \ - fi -endif - -.PHONY: show-branch - -show-branch: $(READY_DEPS:%=$(DEPS_DIR)/%+show-branch) - $(verbose) printf '%-34s %s\n' $(PROJECT): "$$(git symbolic-ref -q --short HEAD || git describe --tags --exact-match)" - -%+show-branch: - $(verbose) printf '%-34s %s\n' $(notdir $*): "$$(cd $* && (git symbolic-ref -q --short HEAD || git describe --tags --exact-match))" - -SINCE_TAG ?= last-release -COMMITS_LOG_OPTS ?= --oneline --decorate --no-merges -MARKDOWN ?= no - -define show_commits_since_tag -set -e; \ -if test "$1"; then \ - erlang_app=$(notdir $1); \ - repository=$(call rmq_cmp_repo_name,$(notdir $1)); \ - git_dir=-C\ "$1"; \ -else \ - erlang_app=$(PROJECT); \ - repository=$(call rmq_cmp_repo_name,$(PROJECT)); \ -fi; \ -case "$(SINCE_TAG)" in \ -last-release) \ - tags_count=$$(git $$git_dir tag -l 2>/dev/null | grep -E -v '(-beta|_milestone|[-_]rc)' | wc -l); \ - ;; \ -*) \ - tags_count=$$(git $$git_dir tag -l 2>/dev/null | wc -l); \ - ;; \ -esac; \ -if test "$$tags_count" -gt 0; then \ - case "$(SINCE_TAG)" in \ - last-release) \ - ref=$$(git $$git_dir describe --abbrev=0 --tags \ - --exclude "*-beta*" \ - --exclude "*_milestone*" \ - --exclude "*[-_]rc*"); \ - ;; \ - last-prerelease) \ - ref=$$(git $$git_dir describe --abbrev=0 --tags); \ - ;; \ - *) \ - git $$git_dir rev-parse "$(SINCE_TAG)" -- >/dev/null; \ - ref=$(SINCE_TAG); \ - ;; \ - esac; \ - commits_count=$$(git $$git_dir log --oneline "$$ref.." | wc -l); \ - if test "$$commits_count" -gt 0; then \ - if test "$(MARKDOWN)" = yes; then \ - printf "\n## [\`$$repository\`](https://github.com/rabbitmq/$$repository)\n\nCommits since \`$$ref\`:\n\n"; \ - git $$git_dir --no-pager log $(COMMITS_LOG_OPTS) \ - --format="format:* %s ([\`%h\`](https://github.com/rabbitmq/$$repository/commit/%H))" \ - "$$ref.."; \ - echo; \ - else \ - echo; \ - echo "# $$repository - Commits since $$ref"; \ - git $$git_dir log $(COMMITS_LOG_OPTS) "$$ref.."; \ - fi; \ - fi; \ -else \ - if test "$(MARKDOWN)" = yes; then \ - printf "\n## [\`$$repository\`](https://github.com/rabbitmq/$$repository)\n\n**New** since the last release!\n"; \ - else \ - echo; \ - echo "# $$repository - New since the last release!"; \ - fi; \ -fi -endef - -.PHONY: commits-since-release - -commits-since-release: commits-since-release-title \ - $(RELEASED_RMQ_DEPS:%=$(DEPS_DIR)/%+commits-since-release) - $(verbose) $(call show_commits_since_tag) - -commits-since-release-title: - $(verbose) set -e; \ - case "$(SINCE_TAG)" in \ - last-release) \ - tags_count=$$(git $$git_dir tag -l 2>/dev/null | grep -E -v '(-beta|_milestone|[-_]rc)' | wc -l); \ - ;; \ - *) \ - tags_count=$$(git $$git_dir tag -l 2>/dev/null | wc -l); \ - ;; \ - esac; \ - if test "$$tags_count" -gt 0; then \ - case "$(SINCE_TAG)" in \ - last-release) \ - ref=$$(git $$git_dir describe --abbrev=0 --tags \ - --exclude "*-beta*" \ - --exclude "*_milestone*" \ - --exclude "*[-_]rc*"); \ - ;; \ - last-prerelease) \ - ref=$$(git $$git_dir describe --abbrev=0 --tags); \ - ;; \ - *) \ - ref=$(SINCE_TAG); \ - ;; \ - esac; \ - version=$$(echo "$$ref" | sed -E \ - -e 's/rabbitmq_v([0-9]+)_([0-9]+)_([0-9]+)/v\1.\2.\3/' \ - -e 's/_milestone/-beta./' \ - -e 's/_rc/-rc./' \ - -e 's/^v//'); \ - echo "# Changes since RabbitMQ $$version"; \ - else \ - echo "# Changes since the beginning of time"; \ - fi - -%+commits-since-release: - $(verbose) $(call show_commits_since_tag,$*) - -endif # ($(wildcard .git),) - -# -------------------------------------------------------------------- -# erlang.mk query-deps* formatting. -# -------------------------------------------------------------------- - -# We need to provide a repo mapping for deps resolved via git_rmq fetch method -query_repo_git_rmq = https://github.com/rabbitmq/$(call rmq_cmp_repo_name,$(1)) - -# -------------------------------------------------------------------- -# Common test logs compression. -# -------------------------------------------------------------------- - -.PHONY: ct-logs-archive clean-ct-logs-archive - -ifneq ($(wildcard logs/*),) -TAR := tar -ifeq ($(PLATFORM),freebsd) -TAR := gtar -endif -ifeq ($(PLATFORM),darwin) -TAR := gtar -endif - -CT_LOGS_ARCHIVE ?= $(PROJECT)-ct-logs-$(subst _,-,$(subst -,,$(subst .,,$(patsubst ct_run.ct_$(PROJECT)@$(HOSTNAME).%,%,$(notdir $(lastword $(wildcard logs/ct_run.*))))))).tar.xz - -ifeq ($(patsubst %.tar.xz,%,$(CT_LOGS_ARCHIVE)),$(CT_LOGS_ARCHIVE)) -$(error CT_LOGS_ARCHIVE file must use '.tar.xz' as its filename extension) -endif - -ct-logs-archive: $(CT_LOGS_ARCHIVE) - @: - -$(CT_LOGS_ARCHIVE): - $(gen_verbose) \ - for file in logs/*; do \ - ! test -L "$$file" || rm "$$file"; \ - done - $(verbose) \ - $(TAR) -c \ - --exclude "*/mnesia" \ - --transform "s/^logs/$(patsubst %.tar.xz,%,$(notdir $(CT_LOGS_ARCHIVE)))/" \ - -f - logs | \ - xz > "$@" -else -ct-logs-archive: - @: -endif - -clean-ct-logs-archive:: - $(gen_verbose) rm -f $(PROJECT)-ct-logs-*.tar.xz - -clean:: clean-ct-logs-archive - -# -------------------------------------------------------------------- -# Generate a file listing RabbitMQ component dependencies and their -# Git commit hash. -# -------------------------------------------------------------------- - -.PHONY: rabbitmq-deps.mk clean-rabbitmq-deps.mk - -rabbitmq-deps.mk: $(PROJECT)-rabbitmq-deps.mk - @: - -closing_paren := ) - -define rmq_deps_mk_line -dep_$(1) := git $(dir $(RABBITMQ_UPSTREAM_FETCH_URL))$(call rmq_cmp_repo_name,$(1)).git $$(git -C "$(2)" rev-parse HEAD) -endef - -$(PROJECT)-rabbitmq-deps.mk: $(ERLANG_MK_RECURSIVE_DEPS_LIST) - $(gen_verbose) echo "# In $(PROJECT) - commit $$(git rev-parse HEAD)" > $@ - $(verbose) cat $(ERLANG_MK_RECURSIVE_DEPS_LIST) | \ - while read -r dir; do \ - component=$$(basename "$$dir"); \ - case "$$component" in \ - $(foreach component,$(RABBITMQ_COMPONENTS),$(component)$(closing_paren) echo "$(call rmq_deps_mk_line,$(component),$$dir)" ;;) \ - esac; \ - done >> $@ - -clean:: clean-rabbitmq-deps.mk - -clean-rabbitmq-deps.mk: - $(gen_verbose) rm -f $(PROJECT)-rabbitmq-deps.mk diff --git a/deps/rabbit_common/src/rabbit_core_metrics.erl b/deps/rabbit_common/src/rabbit_core_metrics.erl index c06b73bc457d..8b5430076f53 100644 --- a/deps/rabbit_common/src/rabbit_core_metrics.erl +++ b/deps/rabbit_common/src/rabbit_core_metrics.erl @@ -124,8 +124,8 @@ terminate() -> connection_created(Pid, Infos) -> ets:insert(connection_created, {Pid, Infos}), - ets:update_counter(connection_churn_metrics, node(), {2, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {2, 1}, + ?CONNECTION_CHURN_METRICS), ok. connection_closed(Pid) -> @@ -133,8 +133,8 @@ connection_closed(Pid) -> ets:delete(connection_metrics, Pid), %% Delete marker ets:update_element(connection_coarse_metrics, Pid, {5, 1}), - ets:update_counter(connection_churn_metrics, node(), {3, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {3, 1}, + ?CONNECTION_CHURN_METRICS), ok. connection_stats(Pid, Infos) -> @@ -148,16 +148,16 @@ connection_stats(Pid, Recv_oct, Send_oct, Reductions) -> channel_created(Pid, Infos) -> ets:insert(channel_created, {Pid, Infos}), - ets:update_counter(connection_churn_metrics, node(), {4, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {4, 1}, + ?CONNECTION_CHURN_METRICS), ok. channel_closed(Pid) -> ets:delete(channel_created, Pid), ets:delete(channel_metrics, Pid), ets:delete(channel_process_metrics, Pid), - ets:update_counter(connection_churn_metrics, node(), {5, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {5, 1}, + ?CONNECTION_CHURN_METRICS), ok. channel_stats(Pid, Infos) -> @@ -276,20 +276,20 @@ queue_stats(Name, MessagesReady, MessagesUnacknowledge, Messages, Reductions) -> queue_declared(_Name) -> %% Name is not needed, but might be useful in the future. - ets:update_counter(connection_churn_metrics, node(), {6, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {6, 1}, + ?CONNECTION_CHURN_METRICS), ok. queue_created(_Name) -> %% Name is not needed, but might be useful in the future. - ets:update_counter(connection_churn_metrics, node(), {7, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {7, 1}, + ?CONNECTION_CHURN_METRICS), ok. queue_deleted(Name) -> ets:delete(queue_coarse_metrics, Name), - ets:update_counter(connection_churn_metrics, node(), {8, 1}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {8, 1}, + ?CONNECTION_CHURN_METRICS), %% Delete markers ets:update_element(queue_metrics, Name, {3, 1}), CQX = ets:select(channel_queue_exchange_metrics, match_spec_cqx(Name)), @@ -302,8 +302,8 @@ queue_deleted(Name) -> end, CQ). queues_deleted(Queues) -> - ets:update_counter(connection_churn_metrics, node(), {8, length(Queues)}, - ?CONNECTION_CHURN_METRICS), + _ = ets:update_counter(connection_churn_metrics, node(), {8, length(Queues)}, + ?CONNECTION_CHURN_METRICS), [ delete_queue_metrics(Queue) || Queue <- Queues ], [ begin diff --git a/deps/rabbit_common/src/rabbit_misc.erl b/deps/rabbit_common/src/rabbit_misc.erl index c67d36adc8fe..1821abb75eca 100644 --- a/deps/rabbit_common/src/rabbit_misc.erl +++ b/deps/rabbit_common/src/rabbit_misc.erl @@ -26,9 +26,6 @@ -export([table_lookup/2, set_table_value/4, amqp_table/1, to_amqp_table/1]). -export([r/3, r/2, r_arg/4, rs/1, queue_resource/2, exchange_resource/2]). --export([enable_cover/0, report_cover/0]). --export([enable_cover/1, report_cover/1]). --export([start_cover/1]). -export([throw_on_error/2, with_exit_handler/2, is_abnormal_exit/1, filter_exit_map/2]). -export([ensure_ok/2]). @@ -165,11 +162,6 @@ {invalid_type, rabbit_framing:amqp_field_type()}) | rabbit_types:r(K) when is_subtype(K, atom()). -spec rs(rabbit_types:r(atom())) -> string(). --spec enable_cover() -> ok_or_error(). --spec start_cover([{string(), string()} | string()]) -> 'ok'. --spec report_cover() -> 'ok'. --spec enable_cover([file:filename() | atom()]) -> ok_or_error(). --spec report_cover([file:filename() | atom()]) -> 'ok'. -spec throw_on_error (atom(), thunk(rabbit_types:error(any()) | {ok, A} | A)) -> A. -spec with_exit_handler(thunk(A), thunk(A)) -> A. @@ -449,59 +441,6 @@ queue_resource(VHostPath, Name) -> exchange_resource(VHostPath, Name) -> r(VHostPath, exchange, Name). -enable_cover() -> enable_cover(["."]). - -enable_cover(Dirs) -> - lists:foldl(fun (Dir, ok) -> - case cover:compile_beam_directory( - filename:join(lists:concat([Dir]),"ebin")) of - {error, _} = Err -> Err; - _ -> ok - end; - (_Dir, Err) -> - Err - end, ok, Dirs). - -start_cover(NodesS) -> - {ok, _} = cover:start([rabbit_nodes_common:make(N) || N <- NodesS]), - ok. - -report_cover() -> report_cover(["."]). - -report_cover(Dirs) -> [report_cover1(lists:concat([Dir])) || Dir <- Dirs], ok. - -report_cover1(Root) -> - Dir = filename:join(Root, "cover"), - ok = filelib:ensure_dir(filename:join(Dir, "junk")), - lists:foreach(fun (F) -> file:delete(F) end, - filelib:wildcard(filename:join(Dir, "*.html"))), - {ok, SummaryFile} = file:open(filename:join(Dir, "summary.txt"), [write]), - {CT, NCT} = - lists:foldl( - fun (M,{CovTot, NotCovTot}) -> - {ok, {M, {Cov, NotCov}}} = cover:analyze(M, module), - ok = report_coverage_percentage(SummaryFile, - Cov, NotCov, M), - {ok,_} = cover:analyze_to_file( - M, - filename:join(Dir, atom_to_list(M) ++ ".html"), - [html]), - {CovTot+Cov, NotCovTot+NotCov} - end, - {0, 0}, - lists:sort(cover:modules())), - ok = report_coverage_percentage(SummaryFile, CT, NCT, 'TOTAL'), - ok = file:close(SummaryFile), - ok. - -report_coverage_percentage(File, Cov, NotCov, Mod) -> - io:fwrite(File, "~6.2f ~tp~n", - [if - Cov+NotCov > 0 -> 100.0*Cov/(Cov+NotCov); - true -> 100.0 - end, - Mod]). - %% @doc Halts the emulator returning the given status code to the os. %% On Windows this function will block indefinitely so as to give the io %% subsystem time to flush stdout completely. diff --git a/deps/rabbitmq_amqp1_0/Makefile b/deps/rabbitmq_amqp1_0/Makefile index 30dc3ed18824..f59aac6d7fa7 100644 --- a/deps/rabbitmq_amqp1_0/Makefile +++ b/deps/rabbitmq_amqp1_0/Makefile @@ -3,8 +3,7 @@ PROJECT_DESCRIPTION = Deprecated no-op AMQP 1.0 plugin LOCAL_DEPS = rabbit -DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk .DEFAULT_GOAL = all diff --git a/deps/rabbitmq_amqp_client/Makefile b/deps/rabbitmq_amqp_client/Makefile index 0a50069065e3..d9cabad59ba1 100644 --- a/deps/rabbitmq_amqp_client/Makefile +++ b/deps/rabbitmq_amqp_client/Makefile @@ -8,12 +8,9 @@ BUILD_DEPS = rabbit_common DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk TEST_DEPS = rabbit rabbitmq_ct_helpers -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-hexpm.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-test.mk \ - rabbit_common/mk/rabbitmq-tools.mk +# We do not depend on rabbit therefore can't run the broker; +# however we can run a test broker in the test suites. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk .DEFAULT_GOAL = all diff --git a/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl index f84a19a683ea..bba6767a3ce4 100644 --- a/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl +++ b/deps/rabbitmq_auth_backend_ldap/src/rabbit_auth_backend_ldap.erl @@ -378,14 +378,8 @@ search_groups(LDAP, Desc, GroupsBase, Scope, DN) -> []; {ok, {referral, Referrals}} -> {error, {referrals_not_supported, Referrals}}; - %% support #eldap_search_result before and after - %% https://github.com/erlang/otp/pull/5538 - {ok, {eldap_search_result, [], _Referrals}} -> - []; {ok, {eldap_search_result, [], _Referrals, _Controls}}-> []; - {ok, {eldap_search_result, Entries, _Referrals}} -> - [ON || #eldap_entry{object_name = ON} <- Entries]; {ok, {eldap_search_result, Entries, _Referrals, _Controls}} -> [ON || #eldap_entry{object_name = ON} <- Entries] end. @@ -470,10 +464,6 @@ object_exists(DN, Filter, LDAP) -> {scope, eldap:baseObject()}]) of {ok, {referral, Referrals}} -> {error, {referrals_not_supported, Referrals}}; - %% support #eldap_search_result before and after - %% https://github.com/erlang/otp/pull/5538 - {ok, {eldap_search_result, Entries, _Referrals}} -> - length(Entries) > 0; {ok, {eldap_search_result, Entries, _Referrals, _Controls}} -> length(Entries) > 0; {error, _} = E -> @@ -487,14 +477,8 @@ attribute(DN, AttributeName, LDAP) -> {attributes, [AttributeName]}]) of {ok, {referral, Referrals}} -> {error, {referrals_not_supported, Referrals}}; - %% support #eldap_search_result before and after - %% https://github.com/erlang/otp/pull/5538 - {ok, {eldap_search_result, E = [#eldap_entry{}|_], _Referrals}} -> - get_attributes(AttributeName, E); {ok, {eldap_search_result, E = [#eldap_entry{}|_], _Referrals, _Controls}} -> get_attributes(AttributeName, E); - {ok, {eldap_search_result, _Entries, _Referrals}} -> - {error, not_found}; {ok, {eldap_search_result, _Entries, _Referrals, _Controls}} -> {error, not_found}; {error, _} = E -> @@ -890,18 +874,9 @@ dn_lookup(Username, LDAP) -> {attributes, ["distinguishedName"]}]) of {ok, {referral, Referrals}} -> {error, {referrals_not_supported, Referrals}}; - %% support #eldap_search_result before and after - %% https://github.com/erlang/otp/pull/5538 - {ok, {eldap_search_result, [#eldap_entry{object_name = DN}], _Referrals}}-> - ?L1("DN lookup: ~ts -> ~ts", [Username, DN]), - DN; {ok, {eldap_search_result, [#eldap_entry{object_name = DN}], _Referrals, _Controls}}-> ?L1("DN lookup: ~ts -> ~ts", [Username, DN]), DN; - {ok, {eldap_search_result, Entries, _Referrals}} -> - rabbit_log_ldap:warning("Searching for DN for ~ts, got back ~tp", - [Filled, Entries]), - Filled; {ok, {eldap_search_result, Entries, _Referrals, _Controls}} -> rabbit_log_ldap:warning("Searching for DN for ~ts, got back ~tp", [Filled, Entries]), diff --git a/deps/rabbitmq_aws/Makefile b/deps/rabbitmq_aws/Makefile index 29089276c9b1..3647e0dfd5c1 100644 --- a/deps/rabbitmq_aws/Makefile +++ b/deps/rabbitmq_aws/Makefile @@ -9,7 +9,8 @@ endef LOCAL_DEPS = crypto inets ssl xmerl public_key BUILD_DEPS = rabbit_common -DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk +# We do not depend on rabbit therefore can't run the broker. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk TEST_DEPS = meck include ../../rabbitmq-components.mk diff --git a/deps/rabbitmq_cli/Makefile b/deps/rabbitmq_cli/Makefile index 6c60dbd8a98c..7c8c9f910a96 100644 --- a/deps/rabbitmq_cli/Makefile +++ b/deps/rabbitmq_cli/Makefile @@ -11,7 +11,7 @@ dep_temp = hex 0.4.7 dep_x509 = hex 0.8.8 DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk VERBOSE_TEST ?= true MAX_CASES ?= 1 @@ -113,7 +113,12 @@ rel:: $(ESCRIPTS) @: tests:: $(ESCRIPTS) - $(gen_verbose) $(MIX_TEST) $(TEST_FILE) + $(verbose) $(MAKE) -C ../../ install-cli + $(verbose) $(MAKE) -C ../../ run-background-broker PLUGINS="rabbit rabbitmq_federation rabbitmq_stomp rabbitmq_stream_management amqp_client" + $(gen_verbose) $(MIX_TEST) $(TEST_FILE); \ + RES=$$?; \ + $(MAKE) -C ../../ stop-node; \ + exit $$RES .PHONY: test diff --git a/deps/rabbitmq_cli/test/core/json_stream_test.exs b/deps/rabbitmq_cli/test/core/json_stream_test.exs index ccbe0c54b65f..0d736fb8af61 100644 --- a/deps/rabbitmq_cli/test/core/json_stream_test.exs +++ b/deps/rabbitmq_cli/test/core/json_stream_test.exs @@ -12,6 +12,8 @@ defmodule JsonStreamTest do test "format_output map with atom keys is converted to JSON object" do assert @formatter.format_output(%{a: :apple, b: :beer}, %{}) == "{\"a\":\"apple\",\"b\":\"beer\"}" + or @formatter.format_output(%{a: :apple, b: :beer}, %{}) == + "{\"b\":\"beer\",\"a\":\"apple\"}" end test "format_output map with binary keys is converted to JSON object" do diff --git a/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app index 94f286b72257..8ea87019ad7d 100644 --- a/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app +++ b/deps/rabbitmq_cli/test/fixtures/plugins/plugins_with_version_requirements/mock_rabbitmq_plugin_for_3_8-0.1.0/ebin/mock_rabbitmq_plugin_for_3_8.app @@ -6,5 +6,5 @@ {applications, [kernel,stdlib,rabbit]}, {mod, {mock_rabbitmq_plugins_01_app, []}}, {env, []}, - {broker_version_requirements, ["3.9.0", "3.10.0", "3.11.0", "3.12.0", "3.13.0", "4.0.0"]} + {broker_version_requirements, ["3.9.0", "3.10.0", "3.11.0", "3.12.0", "3.13.0", "4.0.0", "4.1.0"]} ]}. diff --git a/deps/rabbitmq_codegen/Makefile b/deps/rabbitmq_codegen/Makefile index 55d72ed88a1e..a2f6c0be813f 100644 --- a/deps/rabbitmq_codegen/Makefile +++ b/deps/rabbitmq_codegen/Makefile @@ -8,35 +8,3 @@ clean: distclean: clean find . -regex '.*\(~\|#\|\.swp\)' -exec rm {} \; - -# Upstream URL for the current project. -RABBITMQ_COMPONENT_REPO_NAME := rabbitmq-codegen -RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git -RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git - -# Current URL for the current project. If this is not a Git clone, -# default to the upstream Git repository. -ifneq ($(wildcard .git),) -git_origin_fetch_url := $(shell git config remote.origin.url) -git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url) -RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url) -RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url) -else -RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL) -RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL) -endif - -.PHONY: show-upstream-git-fetch-url show-upstream-git-push-url \ - show-current-git-fetch-url show-current-git-push-url - -show-upstream-git-fetch-url: - @echo $(RABBITMQ_UPSTREAM_FETCH_URL) - -show-upstream-git-push-url: - @echo $(RABBITMQ_UPSTREAM_PUSH_URL) - -show-current-git-fetch-url: - @echo $(RABBITMQ_CURRENT_FETCH_URL) - -show-current-git-push-url: - @echo $(RABBITMQ_CURRENT_PUSH_URL) diff --git a/deps/rabbitmq_ct_client_helpers/Makefile b/deps/rabbitmq_ct_client_helpers/Makefile index c61e87a82a34..84b5238fb08e 100644 --- a/deps/rabbitmq_ct_client_helpers/Makefile +++ b/deps/rabbitmq_ct_client_helpers/Makefile @@ -3,8 +3,7 @@ PROJECT_DESCRIPTION = Common Test helpers for RabbitMQ (client-side helpers) DEPS = rabbit_common rabbitmq_ct_helpers amqp_client -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-tools.mk +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk PLT_APPS = common_test diff --git a/deps/rabbitmq_ct_helpers/Makefile b/deps/rabbitmq_ct_helpers/Makefile index be609ab79070..be8cfaee95dd 100644 --- a/deps/rabbitmq_ct_helpers/Makefile +++ b/deps/rabbitmq_ct_helpers/Makefile @@ -2,7 +2,13 @@ PROJECT = rabbitmq_ct_helpers PROJECT_DESCRIPTION = Common Test helpers for RabbitMQ DEPS = rabbit_common amqp10_common rabbitmq_stream_common proper inet_tcp_proxy meck -TEST_DEPS = rabbit +LOCAL_DEPS = common_test eunit inets +#TEST_DEPS = rabbit + +# We are calling one function from 'rabbit' so we need it in the PLT. +# But really this should be a full dependency; or we don't use the +# function anymore; or move it to rabbit_common. @todo +dialyze: DEPS += rabbit XREF_IGNORE = [ \ {'Elixir.OptionParser',split,1}, \ @@ -10,10 +16,9 @@ XREF_IGNORE = [ \ dep_inet_tcp_proxy = git https://github.com/rabbitmq/inet_tcp_proxy master -DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk \ - rabbit_common/mk/rabbitmq-dist.mk \ - rabbit_common/mk/rabbitmq-run.mk \ - rabbit_common/mk/rabbitmq-tools.mk +# As this is a helper application we don't need other plugins; +# however we can run a test broker in the test suites. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk include ../../rabbitmq-components.mk include ../../erlang.mk diff --git a/deps/rabbitmq_ct_helpers/app.bzl b/deps/rabbitmq_ct_helpers/app.bzl index dfb1163d4435..5cc19256f268 100644 --- a/deps/rabbitmq_ct_helpers/app.bzl +++ b/deps/rabbitmq_ct_helpers/app.bzl @@ -11,6 +11,9 @@ def all_beam_files(name = "all_beam_files"): name = "other_beam", testonly = True, srcs = [ + "src/ct_master_event_fork.erl", + "src/ct_master_fork.erl", + "src/ct_master_logs_fork.erl", "src/cth_log_redirect_any_domains.erl", "src/rabbit_control_helper.erl", "src/rabbit_ct_broker_helpers.erl", @@ -38,6 +41,9 @@ def all_test_beam_files(name = "all_test_beam_files"): name = "test_other_beam", testonly = True, srcs = [ + "src/ct_master_event_fork.erl", + "src/ct_master_fork.erl", + "src/ct_master_logs_fork.erl", "src/cth_log_redirect_any_domains.erl", "src/rabbit_control_helper.erl", "src/rabbit_ct_broker_helpers.erl", @@ -101,6 +107,9 @@ def all_srcs(name = "all_srcs"): name = "srcs", testonly = True, srcs = [ + "src/ct_master_event_fork.erl", + "src/ct_master_fork.erl", + "src/ct_master_logs_fork.erl", "src/cth_log_redirect_any_domains.erl", "src/rabbit_control_helper.erl", "src/rabbit_ct_broker_helpers.erl", diff --git a/deps/rabbitmq_ct_helpers/src/ct_master_event_fork.erl b/deps/rabbitmq_ct_helpers/src/ct_master_event_fork.erl new file mode 100644 index 000000000000..2ac634840849 --- /dev/null +++ b/deps/rabbitmq_ct_helpers/src/ct_master_event_fork.erl @@ -0,0 +1,217 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2006-2024. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +%%% Common Test Framework Event Handler +%%% +%%% This module implements an event handler that the CT Master +%%% uses to handle status and progress notifications sent to the +%%% master node during test runs. It also keeps track of the +%%% details of failures which are used by the CT Master to print +%%% a summary at the end of its run. This module may be used as a +%%% template for other event handlers that can be plugged in to +%%% handle logging and reporting on the master node. +-module(ct_master_event_fork). +-moduledoc false. + +-behaviour(gen_event). + +%% API +-export([start_link/0, add_handler/0, add_handler/1, stop/0]). +-export([notify/1, sync_notify/1, get_results/0]). + +%% gen_event callbacks +-export([init/1, handle_event/2, handle_call/2, + handle_info/2, terminate/2, code_change/3]). + +-include_lib("common_test/include/ct_event.hrl"). +-include_lib("common_test/src/ct_util.hrl"). + + +-record(state, {auto_skipped=[], failed=[]}). + +%%==================================================================== +%% gen_event callbacks +%%==================================================================== +%%-------------------------------------------------------------------- +%% Function: start_link() -> {ok,Pid} | {error,Error} +%% Description: Creates an event manager. +%%-------------------------------------------------------------------- +start_link() -> + gen_event:start_link({local,?CT_MEVMGR}). + +%%-------------------------------------------------------------------- +%% Function: add_handler() -> ok | {'EXIT',Reason} | term() +%% Description: Adds an event handler +%%-------------------------------------------------------------------- +add_handler() -> + gen_event:add_handler(?CT_MEVMGR_REF,?MODULE,[]). +add_handler(Args) -> + gen_event:add_handler(?CT_MEVMGR_REF,?MODULE,Args). + +%%-------------------------------------------------------------------- +%% Function: stop() -> ok +%% Description: Stops the event manager +%%-------------------------------------------------------------------- +stop() -> + case flush() of + {error,Reason} -> + ct_master_logs_fork:log("Error", + "No response from CT Master Event.\n" + "Reason = ~tp\n" + "Terminating now!\n",[Reason]), + %% communication with event manager fails, kill it + catch exit(whereis(?CT_MEVMGR_REF), kill); + _ -> + gen_event:stop(?CT_MEVMGR_REF) + end. + +flush() -> + try gen_event:call(?CT_MEVMGR_REF,?MODULE,flush,1800000) of + flushing -> + timer:sleep(1), + flush(); + done -> + ok; + Error = {error,_} -> + Error + catch + _:Reason -> + {error,Reason} + end. + +%%-------------------------------------------------------------------- +%% Function: notify(Event) -> ok +%% Description: Asynchronous notification to event manager. +%%-------------------------------------------------------------------- +notify(Event) -> + gen_event:notify(?CT_MEVMGR_REF,Event). + +%%-------------------------------------------------------------------- +%% Function: sync_notify(Event) -> ok +%% Description: Synchronous notification to event manager. +%%-------------------------------------------------------------------- +sync_notify(Event) -> + gen_event:sync_notify(?CT_MEVMGR_REF,Event). + +%%-------------------------------------------------------------------- +%% Function: sync_notify(Event) -> Results +%% Description: Get the results for auto-skipped and failed test cases. +%%-------------------------------------------------------------------- +get_results() -> + gen_event:call(?CT_MEVMGR_REF,?MODULE,get_results). + +%%==================================================================== +%% gen_event callbacks +%%==================================================================== +%%-------------------------------------------------------------------- +%% Function: init(Args) -> {ok, State} +%% Description: Whenever a new event handler is added to an event manager, +%% this function is called to initialize the event handler. +%%-------------------------------------------------------------------- +init(_) -> + ct_util:mark_process(), + ct_master_logs_fork:log("CT Master Event Handler started","",[]), + {ok,#state{}}. + +%%-------------------------------------------------------------------- +%% Function: +%% handle_event(Event, State) -> {ok, State} | +%% {swap_handler, Args1, State1, Mod2, Args2} | +%% remove_handler +%% Description:Whenever an event manager receives an event sent using +%% gen_event:notify/2 or gen_event:sync_notify/2, this function is called for +%% each installed event handler to handle the event. +%%-------------------------------------------------------------------- +handle_event(#event{name=start_logging,node=Node,data=RunDir},State) -> + ct_master_logs_fork:log("CT Master Event Handler","Got ~ts from ~w",[RunDir,Node]), + ct_master_logs_fork:nodedir(Node,RunDir), + {ok,State}; + +handle_event(Event=#event{name=Name,node=Node,data=Data},State) -> + print("~n=== ~w ===~n", [?MODULE]), + print("~tw on ~w: ~tp~n", [Name,Node,Data]), + {ok,maybe_store_event(Event,State)}. + +%%-------------------------------------------------------------------- +%% Function: +%% handle_call(Request, State) -> {ok, Reply, State} | +%% {swap_handler, Reply, Args1, State1, +%% Mod2, Args2} | +%% {remove_handler, Reply} +%% Description: Whenever an event manager receives a request sent using +%% gen_event:call/3,4, this function is called for the specified event +%% handler to handle the request. +%%-------------------------------------------------------------------- +handle_call(get_results,State=#state{auto_skipped=AutoSkipped,failed=Failed}) -> + {ok,#{ + auto_skipped => lists:sort(AutoSkipped), + failed => lists:sort(Failed) + },State}; +handle_call(flush,State) -> + case process_info(self(),message_queue_len) of + {message_queue_len,0} -> + {ok,done,State}; + _ -> + {ok,flushing,State} + end. + +%%-------------------------------------------------------------------- +%% Function: +%% handle_info(Info, State) -> {ok, State} | +%% {swap_handler, Args1, State1, Mod2, Args2} | +%% remove_handler +%% Description: This function is called for each installed event handler when +%% an event manager receives any other message than an event or a synchronous +%% request (or a system message). +%%-------------------------------------------------------------------- +handle_info(_Info,State) -> + {ok,State}. + +%%-------------------------------------------------------------------- +%% Function: terminate(Reason, State) -> ok +%% Description:Whenever an event handler is deleted from an event manager, +%% this function is called. It should be the opposite of Module:init/1 and +%% do any necessary cleaning up. +%%-------------------------------------------------------------------- +terminate(_Reason,_State) -> + ct_master_logs_fork:log("CT Master Event Handler stopping","",[]), + ok. + +%%-------------------------------------------------------------------- +%% Function: code_change(OldVsn, State, Extra) -> {ok, NewState} +%% Description: Convert process state when code is changed +%%-------------------------------------------------------------------- +code_change(_OldVsn,State,_Extra) -> + {ok,State}. + +%%-------------------------------------------------------------------- +%%% Internal functions +%%-------------------------------------------------------------------- + +print(_Str,_Args) -> +% io:format(_Str,_Args), + ok. + +maybe_store_event(#event{name=tc_done,node=Node,data={Suite,FuncOrGroup,{auto_skipped,Reason}}},State=#state{auto_skipped=Acc}) -> + State#state{auto_skipped=[{Node,Suite,FuncOrGroup,Reason}|Acc]}; +maybe_store_event(#event{name=tc_done,node=Node,data={Suite,FuncOrGroup,{failed,Reason}}},State=#state{failed=Acc}) -> + State#state{failed=[{Node,Suite,FuncOrGroup,Reason}|Acc]}; +maybe_store_event(_Event,State) -> + State. diff --git a/deps/rabbitmq_ct_helpers/src/ct_master_fork.erl b/deps/rabbitmq_ct_helpers/src/ct_master_fork.erl new file mode 100644 index 000000000000..a698ca9e1613 --- /dev/null +++ b/deps/rabbitmq_ct_helpers/src/ct_master_fork.erl @@ -0,0 +1,1003 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2006-2024. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +-module(ct_master_fork). +%-moduledoc """ +%Distributed test execution control for `Common Test`. +% +%This module exports functions for running `Common Test` nodes on multiple hosts +%in parallel. +%""". + +-export([run/1,run/3,run/4]). +-export([run_on_node/2,run_on_node/3]). +-export([run_test/1,run_test/2]). +-export([get_event_mgr_ref/0]). +-export([basic_html/1,esc_chars/1]). + +-export([abort/0,abort/1,progress/0]). + +-export([init_master/7, init_node_ctrl/3]). + +-export([status/2]). + +-include_lib("common_test/include/ct_event.hrl"). +-include_lib("common_test/src/ct_util.hrl"). + +%-doc "Filename of test spec to be executed.". +-type test_spec() :: file:name_all(). + +-record(state, {node_ctrl_pids=[], + logdirs=[], + results=[], + locks=[], + blocked=[] + }). + +-export_type([test_spec/0]). + +%-doc """ +%Tests are spawned on `Node` using `ct:run_test/1` +%""". +-spec run_test(Node, Opts) -> 'ok' + when Node :: node(), + Opts :: [OptTuples], + OptTuples :: {'dir', TestDirs} + | {'suite', Suites} + | {'group', Groups} + | {'testcase', Cases} + | {'spec', TestSpecs} + | {'join_specs', boolean()} + | {'label', Label} + | {'config', CfgFiles} + | {'userconfig', UserConfig} + | {'allow_user_terms', boolean()} + | {'logdir', LogDir} + | {'silent_connections', Conns} + | {'stylesheet', CSSFile} + | {'cover', CoverSpecFile} + | {'cover_stop', boolean()} + | {'step', StepOpts} + | {'event_handler', EventHandlers} + | {'include', InclDirs} + | {'auto_compile', boolean()} + | {'abort_if_missing_suites', boolean()} + | {'create_priv_dir', CreatePrivDir} + | {'multiply_timetraps', M} + | {'scale_timetraps', boolean()} + | {'repeat', N} + | {'duration', DurTime} + | {'until', StopTime} + | {'force_stop', ForceStop} + | {'decrypt', DecryptKeyOrFile} + | {'refresh_logs', LogDir} + | {'logopts', LogOpts} + | {'verbosity', VLevels} + | {'basic_html', boolean()} + | {'esc_chars', boolean()} + | {'keep_logs',KeepSpec} + | {'ct_hooks', CTHs} + | {'ct_hooks_order', CTHsOrder} + | {'enable_builtin_hooks', boolean()} + | {'release_shell', boolean()}, + TestDirs :: [string()] | string(), + Suites :: [string()] | [atom()] | string() | atom(), + Cases :: [atom()] | atom(), + Groups :: GroupNameOrPath | [GroupNameOrPath], + GroupNameOrPath :: [atom()] | atom() | 'all', + TestSpecs :: [string()] | string(), + Label :: string() | atom(), + CfgFiles :: [string()] | string(), + UserConfig :: [{CallbackMod, CfgStrings}] | {CallbackMod, CfgStrings}, + CallbackMod :: atom(), + CfgStrings :: [string()] | string(), + LogDir :: string(), + Conns :: 'all' | [atom()], + CSSFile :: string(), + CoverSpecFile :: string(), + StepOpts :: [StepOpt], + StepOpt :: 'config' | 'keep_inactive', + EventHandlers :: EH | [EH], + EH :: atom() | {atom(), InitArgs} | {[atom()], InitArgs}, + InitArgs :: [term()], + InclDirs :: [string()] | string(), + CreatePrivDir :: 'auto_per_run' | 'auto_per_tc' | 'manual_per_tc', + M :: integer(), + N :: integer(), + DurTime :: HHMMSS, + HHMMSS :: string(), + StopTime :: YYMoMoDDHHMMSS | HHMMSS, + YYMoMoDDHHMMSS :: string(), + ForceStop :: 'skip_rest' | boolean(), + DecryptKeyOrFile :: {'key', DecryptKey} | {'file', DecryptFile}, + DecryptKey :: string(), + DecryptFile :: string(), + LogOpts :: [LogOpt], + LogOpt :: 'no_nl' | 'no_src', + VLevels :: VLevel | [{Category, VLevel}], + VLevel :: integer(), + Category :: atom(), + KeepSpec :: 'all' | pos_integer(), + CTHs :: [CTHModule | {CTHModule, CTHInitArgs}], + CTHsOrder :: atom(), + CTHModule :: atom(), + CTHInitArgs :: term(). +run_test(Node,Opts) -> + run_test([{Node,Opts}]). + +%-doc false. +run_test({Node,Opts}) -> + run_test([{Node,Opts}]); +run_test(NodeOptsList) when is_list(NodeOptsList) -> + start_master(NodeOptsList). + +%-doc """ +%Tests are spawned on the nodes as specified in `TestSpecs`. Each specification +%in `TestSpec` is handled separately. However, it is also possible to specify a +%list of specifications to be merged into one specification before the tests are +%executed. Any test without a particular node specification is also executed on +%the nodes in `InclNodes`. Nodes in the `ExclNodes` list are excluded from the +%test. +%""". +-spec run(TestSpecs, AllowUserTerms, InclNodes, ExclNodes) -> [{Specs, 'ok'} | {'error', Reason}] + when TestSpecs :: TestSpec | [TestSpec] | [[TestSpec]], + TestSpec :: test_spec(), + AllowUserTerms :: boolean(), + InclNodes :: [node()], + ExclNodes :: [node()], + Specs :: [file:filename_all()], + Reason :: term(). +run([TS|TestSpecs],AllowUserTerms,InclNodes,ExclNodes) when is_list(TS), + is_list(InclNodes), + is_list(ExclNodes) -> + %% Note: [Spec] means run one test with Spec + %% [Spec1,Spec2] means run two tests separately + %% [[Spec1,Spec2]] means run one test, with the two specs merged + case catch ct_testspec:collect_tests_from_file([TS],InclNodes, + AllowUserTerms) of + {error,Reason} -> + [{error,Reason} | run(TestSpecs,AllowUserTerms,InclNodes,ExclNodes)]; + Tests -> + RunResult = + lists:map( + fun({Specs,TSRec=#testspec{}}) -> + RunSkipPerNode = + ct_testspec:prepare_tests(TSRec), + RunSkipPerNode2 = + exclude_nodes(ExclNodes,RunSkipPerNode), + TSList = if is_integer(hd(TS)) -> [TS]; + true -> TS end, + {Specs,run_all(RunSkipPerNode2,TSRec,[],[],TSList)} + end, Tests), + RunResult ++ run(TestSpecs,AllowUserTerms,InclNodes,ExclNodes) + end; +run([],_,_,_) -> + []; +run(TS,AllowUserTerms,InclNodes,ExclNodes) when is_list(InclNodes), + is_list(ExclNodes) -> + run([TS],AllowUserTerms,InclNodes,ExclNodes). + +%-doc(#{equiv => run(TestSpecs, false, InclNodes, ExclNodes)}). +-spec run(TestSpecs, InclNodes, ExclNodes) -> [{Specs, 'ok'} | {'error', Reason}] + when TestSpecs :: TestSpec | [TestSpec] | [[TestSpec]], + TestSpec :: test_spec(), + InclNodes :: [node()], + ExclNodes :: [node()], + Specs :: [file:filename_all()], + Reason :: term(). +run(TestSpecs,InclNodes,ExclNodes) -> + run(TestSpecs,false,InclNodes,ExclNodes). + +%-doc """ +%Run tests on spawned nodes as specified in `TestSpecs` (see `run/4`). +% +%Equivalent to [`run(TestSpecs, false, [], [])`](`run/4`) if +%called with TestSpecs being list of strings; +% +%Equivalent to [`run([TS], false, [], [])`](`run/4`) if +%called with TS being string. +%""". +-spec run(TestSpecs) -> [{Specs, 'ok'} | {'error', Reason}] + when TestSpecs :: TestSpec | [TestSpec] | [[TestSpec]], + TestSpec :: test_spec(), + Specs :: [file:filename_all()], + Reason :: term(). +run(TestSpecs=[TS|_]) when is_list(TS) -> + run(TestSpecs,false,[],[]); +run(TS) -> + run([TS],false,[],[]). + + +exclude_nodes([ExclNode|ExNs],RunSkipPerNode) -> + exclude_nodes(ExNs,lists:keydelete(ExclNode,1,RunSkipPerNode)); +exclude_nodes([],RunSkipPerNode) -> + RunSkipPerNode. + + +%-doc """ +%Tests are spawned on `Node` according to `TestSpecs`. +%""". +-spec run_on_node(TestSpecs, AllowUserTerms, Node) -> [{Specs, 'ok'} | {'error', Reason}] + when TestSpecs :: TestSpec | [TestSpec] | [[TestSpec]], + TestSpec :: test_spec(), + AllowUserTerms :: boolean(), + Node :: node(), + Specs :: [file:filename_all()], + Reason :: term(). +run_on_node([TS|TestSpecs],AllowUserTerms,Node) when is_list(TS),is_atom(Node) -> + case catch ct_testspec:collect_tests_from_file([TS],[Node], + AllowUserTerms) of + {error,Reason} -> + [{error,Reason} | run_on_node(TestSpecs,AllowUserTerms,Node)]; + Tests -> + RunResult = + lists:map( + fun({Specs,TSRec=#testspec{}}) -> + {Run,Skip} = ct_testspec:prepare_tests(TSRec,Node), + TSList = if is_integer(hd(TS)) -> [TS]; + true -> TS end, + {Specs,run_all([{Node,Run,Skip}],TSRec,[],[],TSList)} + end, Tests), + RunResult ++ run_on_node(TestSpecs,AllowUserTerms,Node) + end; +run_on_node([],_,_) -> + []; +run_on_node(TS,AllowUserTerms,Node) when is_atom(Node) -> + run_on_node([TS],AllowUserTerms,Node). + +%-doc(#{equiv => run_on_node(TestSpecs, false, Node)}). +-spec run_on_node(TestSpecs, Node) -> [{Specs, 'ok'} | {'error', Reason}] + when TestSpecs :: TestSpec | [TestSpec] | [[TestSpec]], + TestSpec :: test_spec(), + Node :: node(), + Specs :: [file:filename_all()], + Reason :: term(). +run_on_node(TestSpecs,Node) -> + run_on_node(TestSpecs,false,Node). + + + +run_all([{Node,Run,Skip}|Rest],TSRec=#testspec{label = Labels, +% profile = Profiles, + logdir = LogDirs, + logopts = LogOptsList, + basic_html = BHs, + esc_chars = EscChs, + stylesheet = SSs, + verbosity = VLvls, + silent_connections = SilentConnsList, + cover = CoverFs, + cover_stop = CoverStops, + config = Cfgs, + userconfig = UsrCfgs, + event_handler = EvHs, + ct_hooks = CTHooks, + %% Not available in OTP-26. We don't use it so leave commented for now. +% ct_hooks_order = CTHooksOrder0, + enable_builtin_hooks = EnableBuiltinHooks0, + auto_compile = ACs, + abort_if_missing_suites = AiMSs, + include = Incl, + multiply_timetraps = MTs, + scale_timetraps = STs, + create_priv_dir = PDs}, + NodeOpts,LogDirsRun,Specs) -> + %% We mirror ct_run:get_data_for_node to retrieve data from #testspec, + %% but set the default values where appropriate. + Label = proplists:get_value(Node, Labels), +% Profile = proplists:get_value(Node, Profiles), + LogDir = case proplists:get_value(Node, LogDirs) of + undefined -> "."; + Dir -> Dir + end, + LogOpts = case proplists:get_value(Node, LogOptsList) of + undefined -> []; + LOs -> LOs + end, + BasicHtml = proplists:get_value(Node, BHs, false), + EscChars = proplists:get_value(Node, EscChs, true), + Stylesheet = proplists:get_value(Node, SSs), + Verbosity = case proplists:get_value(Node, VLvls) of + undefined -> []; + Lvls -> Lvls + end, + SilentConns = case proplists:get_value(Node, SilentConnsList) of + undefined -> []; + SCs -> SCs + end, + Cover = proplists:get_value(Node, CoverFs), + CoverStop = proplists:get_value(Node, CoverStops, true), + MT = proplists:get_value(Node, MTs, 1), + ST = proplists:get_value(Node, STs, false), + CreatePrivDir = proplists:get_value(Node, PDs, auto_per_run), + %% For these two values we can't exactly mirror get_data_for_node. + ConfigFiles = + lists:foldr(fun({N,F},Fs) when N == Node -> [F|Fs]; + ({_N,_F},Fs) -> Fs; + (F,Fs) -> [F|Fs] + end,[],Cfgs), + UsrConfigFiles = + lists:foldr(fun({N,F},Fs) when N == Node -> [{userconfig, F}|Fs]; + ({_N,_F},Fs) -> Fs; + (F,Fs) -> [{userconfig, F}|Fs] + end,[],UsrCfgs), + EvHandlers = [{H,A} || {N,H,A} <- EvHs, N==Node], + FiltCTHooks = [Hook || {N,Hook} <- CTHooks, N==Node], +% CTHooksOrder = case CTHooksOrder0 of +% undefined -> test; +% _ -> CTHooksOrder0 +% end, + EnableBuiltinHooks = case EnableBuiltinHooks0 of + undefined -> true; + _ -> EnableBuiltinHooks0 + end, + AutoCompile = proplists:get_value(Node, ACs, true), + AbortIfMissing = proplists:get_value(Node, AiMSs, false), + Include = [I || {N,I} <- Incl, N==Node], + %% We then build the ct:run_test/1 options list. + RunTestOpts0 = + [{label, Label} || Label =/= undefined] ++ + [{stylesheet, Stylesheet} || Stylesheet =/= undefined] ++ + [{cover, Cover} || Cover =/= undefined] ++ + UsrConfigFiles, + RunTestOpts = [ +% {profile, Profile}, + {logdir, LogDir}, + {logopts, LogOpts}, + {basic_html, BasicHtml}, + {esc_chars, EscChars}, + {verbosity, Verbosity}, + {silent_connections, SilentConns}, + {cover_stop, CoverStop}, + {config, ConfigFiles}, + {event_handler, EvHandlers}, + {ct_hooks, FiltCTHooks}, +% {ct_hooks_order, CTHooksOrder}, + {enable_builtin_hooks, EnableBuiltinHooks}, + {auto_compile, AutoCompile}, + {abort_if_missing_suites, AbortIfMissing}, + {include, Include}, + {multiply_timetraps, MT}, + {scale_timetraps, ST}, + {create_priv_dir, CreatePrivDir} + |RunTestOpts0], + NO = {Node,[{prepared_tests,{Run,Skip},Specs}|RunTestOpts]}, + run_all(Rest,TSRec,[NO|NodeOpts],[LogDir|LogDirsRun],Specs); +run_all([],#testspec{ + logdir=AllLogDirs, + init=InitOptions, + event_handler=AllEvHs}, + NodeOpts,LogDirsRun,Specs) -> + Handlers = [{H,A} || {Master,H,A} <- AllEvHs, Master == master], + MasterLogDir = case lists:keysearch(master,1,AllLogDirs) of + {value,{_,Dir}} -> Dir; + false -> "." + end, + log(tty,"Master Logdir","~ts",[MasterLogDir]), + start_master(lists:reverse(NodeOpts),Handlers,MasterLogDir, + LogDirsRun,InitOptions,Specs). + + +%-doc """ +%Stops all running tests. +%""". +-spec abort() -> 'ok'. +abort() -> + call(abort). + +%-doc """ +%Stops tests on specified nodes. +%""". +-spec abort(Nodes) -> 'ok' + when Nodes :: Node | [Node], + Node :: node(). +abort(Nodes) when is_list(Nodes) -> + call({abort,Nodes}); + +abort(Node) when is_atom(Node) -> + abort([Node]). + +%-doc """ +%Returns test progress. If `Status` is `ongoing`, tests are running on the node +%and are not yet finished. +%""". +-spec progress() -> [{Node, Status}] + when Node :: node(), + Status :: atom(). +progress() -> + call(progress). + +%-doc """ +%Gets a reference to the `Common Test` master event manager. The reference can be +%used to, for example, add a user-specific event handler while tests are running. +% +%_Example:_ +% +%```erlang +%gen_event:add_handler(ct_master:get_event_mgr_ref(), my_ev_h, []) +%``` +%""". +%-doc(#{since => <<"OTP 17.5">>}). +-spec get_event_mgr_ref() -> atom(). +get_event_mgr_ref() -> + ?CT_MEVMGR_REF. + +%-doc """ +%If set to `true`, the `ct_master logs` are written on a primitive HTML format, +%not using the `Common Test` CSS style sheet. +%""". +%-doc(#{since => <<"OTP R15B01">>}). +-spec basic_html(Bool) -> 'ok' + when Bool :: boolean(). +basic_html(Bool) -> + application:set_env(common_test_master, basic_html, Bool), + ok. + +%-doc false. +esc_chars(Bool) -> + application:set_env(common_test_master, esc_chars, Bool), + ok. + +%%%----------------------------------------------------------------- +%%% MASTER, runs on central controlling node. +%%%----------------------------------------------------------------- +start_master(NodeOptsList) -> + start_master(NodeOptsList,[],".",[],[],[]). + +start_master(NodeOptsList,EvHandlers,MasterLogDir,LogDirs,InitOptions,Specs) -> + Master = spawn_link(?MODULE,init_master,[self(),NodeOptsList,EvHandlers, + MasterLogDir,LogDirs, + InitOptions,Specs]), + receive + {Master,Result} -> Result + end. + +%-doc false. +init_master(Parent,NodeOptsList,EvHandlers,MasterLogDir,LogDirs, + InitOptions,Specs) -> + case whereis(ct_master) of + undefined -> + register(ct_master,self()), + ct_util:mark_process(), + ok; + _Pid -> + io:format("~nWarning: ct_master already running!~n"), + exit(aborted) +% case io:get_line('[y/n]>') of +% "y\n" -> +% ok; +% "n\n" -> +% exit(aborted); +% _ -> +% init_master(NodeOptsList,LogDirs) +% end + end, + + %% start master logger + {MLPid,_} = ct_master_logs_fork:start(MasterLogDir, + [N || {N,_} <- NodeOptsList]), + log(all,"Master Logger process started","~w",[MLPid]), + + case Specs of + [] -> ok; + _ -> + SpecsStr = lists:map(fun(Name) -> + Name ++ " " + end,Specs), + ct_master_logs_fork:log("Test Specification file(s)","~ts", + [lists:flatten(SpecsStr)]) + end, + + %% start master event manager and add default handler + {ok, _} = start_ct_master_event(), + ct_master_event_fork:add_handler(), + %% add user handlers for master event manager + Add = fun({H,Args}) -> + log(all,"Adding Event Handler","~w",[H]), + case gen_event:add_handler(?CT_MEVMGR_REF,H,Args) of + ok -> ok; + {'EXIT',Why} -> exit(Why); + Other -> exit({event_handler,Other}) + end + end, + lists:foreach(Add,EvHandlers), + + %% double check event manager is started and registered + case whereis(?CT_MEVMGR) of + undefined -> + exit({?CT_MEVMGR,undefined}); + Pid when is_pid(Pid) -> + ok + end, + init_master1(Parent,NodeOptsList,InitOptions,LogDirs). + +start_ct_master_event() -> + case ct_master_event_fork:start_link() of + {error, {already_started, Pid}} -> + {ok, Pid}; + Else -> + Else + end. + +init_master1(Parent,NodeOptsList,InitOptions,LogDirs) -> + {Inaccessible,NodeOptsList1,InitOptions1} = init_nodes(NodeOptsList, + InitOptions), + case Inaccessible of + [] -> + init_master2(Parent,NodeOptsList,LogDirs); + _ -> + io:format("~nThe following nodes are inaccessible: ~p~n~n", + [Inaccessible]), + io:format("Proceed(p), Rescan(r) or Abort(a)? "), + case io:get_line('[p/r/a]>') of + "p\n" -> + log(html,"Inaccessible Nodes", + "Proceeding without: ~p",[Inaccessible]), + init_master2(Parent,NodeOptsList1,LogDirs); + "r\n" -> + init_master1(Parent,NodeOptsList,InitOptions1,LogDirs); + _ -> + log(html,"Aborting Tests","",[]), + ct_master_event_fork:stop(), + ct_master_logs_fork:stop(), + exit(aborted) + end + end. + +init_master2(Parent,NodeOptsList,LogDirs) -> + process_flag(trap_exit,true), + Cookie = erlang:get_cookie(), + log(all,"Cookie","~tw",[Cookie]), + log(all,"Starting Tests", + "Tests starting on: ~p",[[N || {N,_} <- NodeOptsList]]), + SpawnAndMon = + fun({Node,Opts}) -> + monitor_node(Node,true), + log(all,"Test Info","Starting test(s) on ~w...",[Node]), + {spawn_link(Node,?MODULE,init_node_ctrl,[self(),Cookie,Opts]), + Node} + end, + NodeCtrlPids = lists:map(SpawnAndMon,NodeOptsList), + Result = master_loop(#state{node_ctrl_pids=NodeCtrlPids, + logdirs=LogDirs}), + Parent ! {self(),Result}. + +master_loop(#state{node_ctrl_pids=[], + results=Finished0}) -> + Finished = lists:sort(Finished0), + Str = + lists:map(fun({Node,Result}) -> + io_lib:format("~-40.40.*ts~tp\n", + [$_,atom_to_list(Node),Result]) + end,Finished), + log(all,"TEST RESULTS","~ts", [Str]), + log(all,"Info","Updating log files",[]), + + %% Print the failed and auto skipped tests. + master_print_summary(), + + ct_master_event_fork:stop(), + ct_master_logs_fork:stop(), + {ok, Finished}; + +master_loop(State=#state{node_ctrl_pids=NodeCtrlPids, + results=Results, + locks=Locks, + blocked=Blocked}) -> + receive + {'EXIT',Pid,Reason} -> + case get_node(Pid,NodeCtrlPids) of + {Node,NodeCtrlPids1} -> + monitor_node(Node,false), + case Reason of + normal -> + log(all,"Test Info", + "Test(s) on node ~w finished.",[Node]), + master_loop(State#state{node_ctrl_pids=NodeCtrlPids1}); + Bad -> + Error = + case Bad of + What when What=/=killed,is_atom(What) -> + {error,Bad}; + _ -> + Bad + end, + log(all,"Test Info", + "Test on node ~w failed! Reason: ~tp", + [Node,Error]), + {Locks1,Blocked1} = + update_queue(exit,Node,Locks,Blocked), + master_loop(State#state{node_ctrl_pids=NodeCtrlPids1, + results=[{Node, + Error}|Results], + locks=Locks1, + blocked=Blocked1}) + end; + undefined -> + %% ignore (but report) exit from master_logger etc + log(all,"Test Info", + "Warning! Process ~w has terminated. Reason: ~tp", + [Pid,Reason]), + master_loop(State) + end; + + {nodedown,Node} -> + case get_pid(Node,NodeCtrlPids) of + {_Pid,NodeCtrlPids1} -> + monitor_node(Node,false), + log(all,"Test Info","No connection to testnode ~w!",[Node]), + {Locks1,Blocked1} = + update_queue(exit,Node,Locks,Blocked), + master_loop(State#state{node_ctrl_pids=NodeCtrlPids1, + results=[{Node,nodedown}|Results], + locks=Locks1, + blocked=Blocked1}); + undefined -> + master_loop(State) + end; + + {Pid,{result,Result}} -> + {Node,_} = get_node(Pid,NodeCtrlPids), + master_loop(State#state{results=[{Node,Result}|Results]}); + + {call,progress,From} -> + reply(master_progress(NodeCtrlPids,Results),From), + master_loop(State); + + {call,abort,From} -> + lists:foreach(fun({Pid,Node}) -> + log(all,"Test Info", + "Aborting tests on ~w",[Node]), + exit(Pid,kill) + end,NodeCtrlPids), + reply(ok,From), + master_loop(State); + + {call,{abort,Nodes},From} -> + lists:foreach(fun(Node) -> + case lists:keysearch(Node,2,NodeCtrlPids) of + {value,{Pid,Node}} -> + log(all,"Test Info", + "Aborting tests on ~w",[Node]), + exit(Pid,kill); + false -> + ok + end + end,Nodes), + reply(ok,From), + master_loop(State); + + {call,#event{name=Name,node=Node,data=Data},From} -> + {Op,Lock} = + case Name of + start_make -> + {take,{make,Data}}; + finished_make -> + {release,{make,Data}}; + start_write_file -> + {take,{write_file,Data}}; + finished_write_file -> + {release,{write_file,Data}} + end, + {Locks1,Blocked1} = + update_queue(Op,Node,From,Lock,Locks,Blocked), + if Op == release -> reply(ok,From); + true -> ok + end, + master_loop(State#state{locks=Locks1, + blocked=Blocked1}); + + {cast,Event} when is_record(Event,event) -> + ct_master_event_fork:notify(Event), + master_loop(State) + + end. + +master_print_summary() -> + #{ + auto_skipped := AutoSkipped, + failed := Failed + } = ct_master_event_fork:get_results(), + master_print_summary_for("Auto skipped test cases", AutoSkipped), + master_print_summary_for("Failed test cases", Failed), + ok. + +master_print_summary_for(Title,List) -> + _ = case List of + [] -> ok; + _ -> + Chars = [ + io_lib:format("Node: ~w~nCase: ~w:~w~nReason: ~p~n~n", + [Node, Suite, FuncOrGroup, Reason]) + || {Node, Suite, FuncOrGroup, Reason} <- List], + log(all,Title,Chars,[]) + end, + ok. + +update_queue(take,Node,From,Lock={Op,Resource},Locks,Blocked) -> + %% Locks: [{{Operation,Resource},Node},...] + %% Blocked: [{{Operation,Resource},Node,WaitingPid},...] + case lists:keysearch(Lock,1,Locks) of + {value,{_Lock,Owner}} -> % other node has lock + log(html,"Lock Info","Node ~w blocked on ~w by ~w. Resource: ~tp", + [Node,Op,Owner,Resource]), + Blocked1 = Blocked ++ [{Lock,Node,From}], + {Locks,Blocked1}; + false -> % go ahead + Locks1 = [{Lock,Node}|Locks], + reply(ok,From), + {Locks1,Blocked} + end; + +update_queue(release,Node,_From,Lock={Op,Resource},Locks,Blocked) -> + Locks1 = lists:delete({Lock,Node},Locks), + case lists:keysearch(Lock,1,Blocked) of + {value,E={Lock,SomeNode,WaitingPid}} -> + Blocked1 = lists:delete(E,Blocked), + log(html,"Lock Info","Node ~w proceeds with ~w. Resource: ~tp", + [SomeNode,Op,Resource]), + reply(ok,WaitingPid), % waiting process may start + {Locks1,Blocked1}; + false -> + {Locks1,Blocked} + end. + +update_queue(exit,Node,Locks,Blocked) -> + NodeLocks = lists:foldl(fun({L,N},Ls) when N == Node -> + [L|Ls]; + (_,Ls) -> + Ls + end,[],Locks), + release_locks(Node,NodeLocks,Locks,Blocked). + +release_locks(Node,[Lock|Ls],Locks,Blocked) -> + {Locks1,Blocked1} = update_queue(release,Node,undefined,Lock,Locks,Blocked), + release_locks(Node,Ls,Locks1,Blocked1); +release_locks(_,[],Locks,Blocked) -> + {Locks,Blocked}. + +get_node(Pid,NodeCtrlPids) -> + case lists:keysearch(Pid,1,NodeCtrlPids) of + {value,{Pid,Node}} -> + {Node,lists:keydelete(Pid,1,NodeCtrlPids)}; + false -> + undefined + end. + +get_pid(Node,NodeCtrlPids) -> + case lists:keysearch(Node,2,NodeCtrlPids) of + {value,{Pid,Node}} -> + {Pid,lists:keydelete(Node,2,NodeCtrlPids)}; + false -> + undefined + end. + +ping_nodes(NodeOptions)-> + ping_nodes(NodeOptions, [], []). + +ping_nodes([NO={Node,_Opts}|NOs],Inaccessible,NodeOpts) -> + case net_adm:ping(Node) of + pong -> + ping_nodes(NOs,Inaccessible,[NO|NodeOpts]); + _ -> + ping_nodes(NOs,[Node|Inaccessible],NodeOpts) + end; +ping_nodes([],Inaccessible,NodeOpts) -> + {lists:reverse(Inaccessible),lists:reverse(NodeOpts)}. + +master_progress(NodeCtrlPids,Results) -> + Results ++ lists:map(fun({_Pid,Node}) -> + {Node,ongoing} + end,NodeCtrlPids). + +%%%----------------------------------------------------------------- +%%% NODE CONTROLLER, runs and controls tests on a test node. +%%%----------------------------------------------------------------- +%-doc false. +init_node_ctrl(MasterPid,Cookie,Opts) -> + %% make sure tests proceed even if connection to master is lost + process_flag(trap_exit, true), + ct_util:mark_process(), + MasterNode = node(MasterPid), + group_leader(whereis(user),self()), + io:format("~n********** node_ctrl process ~w started on ~w **********~n", + [self(),node()]), + %% initially this node must have the same cookie as the master node + %% but now we set it explicitly for the connection so that test suites + %% can change the cookie for the node if they wish + case erlang:get_cookie() of + Cookie -> % first time or cookie not changed + erlang:set_cookie(node(MasterPid),Cookie); + _ -> + ok + end, + case whereis(ct_util_server) of + undefined -> ok; + Pid -> exit(Pid,kill) + end, + + %% start a local event manager + {ok, _} = start_ct_event(), + ct_event:add_handler([{master,MasterPid}]), + + %% log("Running test with options: ~tp~n", [Opts]), + Result = case (catch ct:run_test(Opts)) of + ok -> finished_ok; + Other -> Other + end, + + %% stop local event manager + ct_event:stop(), + + case net_adm:ping(MasterNode) of + pong -> + MasterPid ! {self(),{result,Result}}; + pang -> + io:format("Warning! Connection to master node ~w is lost. " + "Can't report result!~n~n", [MasterNode]) + end. + +start_ct_event() -> + case ct_event:start_link() of + {error, {already_started, Pid}} -> + {ok, Pid}; + Else -> + Else + end. + +%%%----------------------------------------------------------------- +%%% Event handling +%%%----------------------------------------------------------------- +%-doc false. +status(MasterPid,Event=#event{name=start_make}) -> + call(MasterPid,Event); +status(MasterPid,Event=#event{name=finished_make}) -> + call(MasterPid,Event); +status(MasterPid,Event=#event{name=start_write_file}) -> + call(MasterPid,Event); +status(MasterPid,Event=#event{name=finished_write_file}) -> + call(MasterPid,Event); +status(MasterPid,Event) -> + cast(MasterPid,Event). + +%%%----------------------------------------------------------------- +%%% Internal +%%%----------------------------------------------------------------- + +log(To,Heading,Str,Args) -> + if To == all ; To == tty -> + Chars = ["=== ",Heading," ===\n", + io_lib:format(Str,Args),"\n"], + io:put_chars(Chars); + true -> + ok + end, + if To == all ; To == html -> + ct_master_logs_fork:log(Heading,Str,Args); + true -> + ok + end. + + +call(Msg) -> + call(whereis(ct_master),Msg). + +call(undefined,_Msg) -> + {error,not_running}; + +call(Pid,Msg) -> + Ref = erlang:monitor(process,Pid), + Pid ! {call,Msg,self()}, + Return = receive + {Pid,Result} -> + Result; + {'DOWN', Ref, _, _, _} -> + {error,master_died} + end, + erlang:demonitor(Ref, [flush]), + Return. + +reply(Result,To) -> + To ! {self(),Result}, + ok. + +init_nodes(NodeOptions, InitOptions)-> + _ = ping_nodes(NodeOptions), + start_nodes(InitOptions), + eval_on_nodes(InitOptions), + {Inaccessible, NodeOptions1}=ping_nodes(NodeOptions), + InitOptions1 = filter_accessible(InitOptions, Inaccessible), + {Inaccessible, NodeOptions1, InitOptions1}. + +% only nodes which are inaccessible now, should be initiated later +filter_accessible(InitOptions, Inaccessible)-> + [{Node,Option}||{Node,Option}<-InitOptions, lists:member(Node, Inaccessible)]. + +start_nodes(InitOptions)-> + lists:foreach(fun({NodeName, Options})-> + [NodeS,HostS]=string:lexemes(atom_to_list(NodeName), "@"), + Node=list_to_atom(NodeS), + Host=list_to_atom(HostS), + HasNodeStart = lists:keymember(node_start, 1, Options), + IsAlive = lists:member(NodeName, nodes()), + case {HasNodeStart, IsAlive} of + {false, false}-> + io:format("WARNING: Node ~w is not alive but has no " + "node_start option~n", [NodeName]); + {false, true}-> + io:format("Node ~w is alive~n", [NodeName]); + {true, false}-> + {node_start, NodeStart} = lists:keyfind(node_start, 1, Options), + {value, {callback_module, Callback}, NodeStart2}= + lists:keytake(callback_module, 1, NodeStart), + case Callback:start(Host, Node, NodeStart2) of + {ok, NodeName} -> + io:format("Node ~w started successfully " + "with callback ~w~n", [NodeName,Callback]); + {error, Reason, _NodeName} -> + io:format("Failed to start node ~w with callback ~w! " + "Reason: ~tp~n", [NodeName, Callback, Reason]) + end; + {true, true}-> + io:format("WARNING: Node ~w is alive but has node_start " + "option~n", [NodeName]) + end + end, + InitOptions). + +eval_on_nodes(InitOptions)-> + lists:foreach(fun({NodeName, Options})-> + HasEval = lists:keymember(eval, 1, Options), + IsAlive = lists:member(NodeName, nodes()), + case {HasEval, IsAlive} of + {false,_}-> + ok; + {true,false}-> + io:format("WARNING: Node ~w is not alive but has eval " + "option~n", [NodeName]); + {true,true}-> + {eval, MFAs} = lists:keyfind(eval, 1, Options), + evaluate(NodeName, MFAs) + end + end, + InitOptions). + +evaluate(Node, [{M,F,A}|MFAs])-> + case rpc:call(Node, M, F, A) of + {badrpc,Reason}-> + io:format("WARNING: Failed to call ~w:~tw/~w on node ~w " + "due to ~tp~n", [M,F,length(A),Node,Reason]); + Result-> + io:format("Called ~w:~tw/~w on node ~w, result: ~tp~n", + [M,F,length(A),Node,Result]) + end, + evaluate(Node, MFAs); +evaluate(_Node, [])-> + ok. + +%cast(Msg) -> +% cast(whereis(ct_master),Msg). + +cast(undefined,_Msg) -> + {error,not_running}; + +cast(Pid,Msg) -> + Pid ! {cast,Msg}, + ok. diff --git a/deps/rabbitmq_ct_helpers/src/ct_master_logs_fork.erl b/deps/rabbitmq_ct_helpers/src/ct_master_logs_fork.erl new file mode 100644 index 000000000000..9541c941708b --- /dev/null +++ b/deps/rabbitmq_ct_helpers/src/ct_master_logs_fork.erl @@ -0,0 +1,569 @@ +%% +%% %CopyrightBegin% +%% +%% Copyright Ericsson AB 2006-2024. All Rights Reserved. +%% +%% Licensed under the Apache License, Version 2.0 (the "License"); +%% you may not use this file except in compliance with the License. +%% You may obtain a copy of the License at +%% +%% http://www.apache.org/licenses/LICENSE-2.0 +%% +%% Unless required by applicable law or agreed to in writing, software +%% distributed under the License is distributed on an "AS IS" BASIS, +%% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +%% See the License for the specific language governing permissions and +%% limitations under the License. +%% +%% %CopyrightEnd% +%% + +%%% Logging functionality for Common Test Master. +%%% +%%% This module implements a logger for the master +%%% node. +-module(ct_master_logs_fork). +-moduledoc false. + +-export([start/2, make_all_runs_index/0, log/3, nodedir/2, + stop/0]). + +-include_lib("common_test/src/ct_util.hrl"). + +-record(state, {log_fd, start_time, logdir, rundir, + nodedir_ix_fd, nodes, nodedirs=[]}). + +-define(ct_master_log_name, "ct_master_log.html"). +-define(all_runs_name, "master_runs.html"). +-define(nodedir_index_name, "index.html"). +-define(details_file_name,"details.info"). +-define(table_color,"lightblue"). + +-define(now, os:timestamp()). + +%%%-------------------------------------------------------------------- +%%% API +%%%-------------------------------------------------------------------- + +start(LogDir,Nodes) -> + Self = self(), + Pid = spawn_link(fun() -> init(Self,LogDir,Nodes) end), + MRef = erlang:monitor(process,Pid), + receive + {started,Pid,Result} -> + erlang:demonitor(MRef, [flush]), + {Pid,Result}; + {'DOWN',MRef,process,_,Reason} -> + exit({could_not_start_process,?MODULE,Reason}) + end. + +log(Heading,Format,Args) -> + cast({log,self(),[{int_header(),[log_timestamp(?now),Heading]}, + {Format,Args}, + {int_footer(),[]}]}), + ok. + +make_all_runs_index() -> + call(make_all_runs_index). + +nodedir(Node,RunDir) -> + call({nodedir,Node,RunDir}). + +stop() -> + case whereis(?MODULE) of + Pid when is_pid(Pid) -> + MRef = erlang:monitor(process,Pid), + ?MODULE ! stop, + receive + {'DOWN',MRef,process,_,_} -> + ok + end; + undefined -> + ok + end, + ok. + +%%%-------------------------------------------------------------------- +%%% Logger process +%%%-------------------------------------------------------------------- + +init(Parent,LogDir,Nodes) -> + register(?MODULE,self()), + ct_util:mark_process(), + Time = calendar:local_time(), + RunDir = make_dirname(Time), + RunDirAbs = filename:join(LogDir,RunDir), + ok = make_dir(RunDirAbs), + _ = write_details_file(RunDirAbs,{node(),Nodes}), + + case basic_html() of + true -> + put(basic_html, true); + BasicHtml -> + put(basic_html, BasicHtml), + %% copy priv files to log dir (both top dir and test run + %% dir) so logs are independent of Common Test installation + CTPath = code:lib_dir(common_test), + PrivFiles = [?css_default,?jquery_script,?tablesorter_script], + PrivFilesSrc = [filename:join(filename:join(CTPath, "priv"), F) || + F <- PrivFiles], + PrivFilesDestTop = [filename:join(LogDir, F) || F <- PrivFiles], + PrivFilesDestRun = [filename:join(RunDirAbs, F) || F <- PrivFiles], + case copy_priv_files(PrivFilesSrc, PrivFilesDestTop) of + {error,Src1,Dest1,Reason1} -> + io:format(user, "ERROR! "++ + "Priv file ~tp could not be copied to ~tp. "++ + "Reason: ~tp~n", + [Src1,Dest1,Reason1]), + exit({priv_file_error,Dest1}); + ok -> + case copy_priv_files(PrivFilesSrc, PrivFilesDestRun) of + {error,Src2,Dest2,Reason2} -> + io:format(user, "ERROR! "++ + "Priv file ~tp could not be copied to ~tp. "++ + "Reason: ~tp~n", + [Src2,Dest2,Reason2]), + exit({priv_file_error,Dest2}); + ok -> + ok + end + end + end, + + {ok,Cwd} = file:get_cwd(), + ok = file:set_cwd(LogDir), + _ = make_all_runs_index(LogDir), + CtLogFd = open_ct_master_log(RunDirAbs), + ok = file:set_cwd(Cwd), + + NodeStr = + lists:flatten(lists:map(fun(N) -> + atom_to_list(N) ++ " " + end,Nodes)), + + io:format(CtLogFd,int_header(),[log_timestamp(?now),"Test Nodes\n"]), + io:format(CtLogFd,"~ts\n",[NodeStr]), + io:put_chars(CtLogFd,[int_footer(),"\n"]), + + NodeDirIxFd = open_nodedir_index(RunDirAbs,Time), + Parent ! {started,self(),{Time,RunDirAbs}}, + loop(#state{log_fd=CtLogFd, + start_time=Time, + logdir=LogDir, + rundir=RunDirAbs, + nodedir_ix_fd=NodeDirIxFd, + nodes=Nodes, + nodedirs=lists:map(fun(N) -> + {N,""} + end,Nodes)}). + +copy_priv_files([SrcF | SrcFs], [DestF | DestFs]) -> + case file:copy(SrcF, DestF) of + {error,Reason} -> + {error,SrcF,DestF,Reason}; + _ -> + copy_priv_files(SrcFs, DestFs) + end; +copy_priv_files([], []) -> + ok. + +loop(State) -> + receive + {log,_From,List} -> + Fd = State#state.log_fd, + Fun = + fun({Str,Args}) -> + case catch io:format(Fd,Str++"\n",Args) of + {'EXIT',Reason} -> + io:format(Fd, + "Logging fails! Str: ~tp, Args: ~tp~n", + [Str,Args]), + exit({logging_failed,Reason}), + ok; + _ -> + ok + end + end, + lists:foreach(Fun,List), + loop(State); + {make_all_runs_index,From} -> + {ok,Cwd} = file:get_cwd(), + ok = file:set_cwd(State#state.logdir), + _ = make_all_runs_index(State#state.logdir), + ok = file:set_cwd(Cwd), + return(From,State#state.logdir), + loop(State); + {{nodedir,Node,RunDir},From} -> + print_nodedir(Node,RunDir,State#state.nodedir_ix_fd), + return(From,ok), + loop(State); + stop -> + {ok,Cwd} = file:get_cwd(), + ok = file:set_cwd(State#state.logdir), + _ = make_all_runs_index(State#state.logdir), + ok = file:set_cwd(Cwd), + io:format(State#state.log_fd, + int_header()++int_footer(), + [log_timestamp(?now),"Finished!"]), + _ = close_ct_master_log(State#state.log_fd), + _ = close_nodedir_index(State#state.nodedir_ix_fd), + ok + end. + + +%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% Master Log functions %%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%% +open_ct_master_log(Dir) -> + FullName = filename:join(Dir,?ct_master_log_name), + {ok,Fd} = file:open(FullName,[write,{encoding,utf8}]), + io:put_chars(Fd,header("Common Test Master Log", {[],[1,2],[]})), + %% maybe add config info here later + io:put_chars(Fd,config_table([])), + io:put_chars(Fd, + "\n"), + io:put_chars(Fd, + xhtml("

Progress Log

\n
\n",
+		       "

Progress Log

\n
\n")),
+    Fd.
+
+close_ct_master_log(Fd) ->
+    io:put_chars(Fd,["
",footer()]), + file:close(Fd). + +config_table(Vars) -> + [config_table_header()|config_table1(Vars)]. + +config_table_header() -> + ["

Configuration

\n", + xhtml(["\n", + "\n"]), + "\n", + xhtml("", "\n\n")]. + +config_table1([]) -> + ["\n
KeyValue
\n"]. + +int_header() -> + "
\n
*** CT MASTER ~s *** ~ts".
+int_footer() ->
+    "
\n
".
+
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+%%% NodeDir Index functions %%%
+%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
+open_nodedir_index(Dir,StartTime) ->
+    FullName = filename:join(Dir,?nodedir_index_name),
+    {ok,Fd} = file:open(FullName,[write,{encoding,utf8}]),
+    io:put_chars(Fd,nodedir_index_header(StartTime)),
+    Fd.
+
+print_nodedir(Node,RunDir,Fd) ->
+    Index = filename:join(RunDir,"index.html"),
+    io:put_chars(Fd,
+		 ["\n"
+		  "",atom_to_list(Node),"\n",
+		  "",Index,
+		  "\n",
+		  "\n"]),
+    ok.
+
+close_nodedir_index(Fd) ->
+    io:put_chars(Fd,index_footer()),
+    file:close(Fd).
+
+nodedir_index_header(StartTime) ->
+    [header("Log Files " ++ format_time(StartTime), {[],[1,2],[]}) |
+     ["
\n", + "

Common Test Master Log

", + xhtml(["\n"], + ["
\n", + "\n\n"]), + "\n", + "\n", + xhtml("", "\n\n\n")]]. + +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +%%% All Run Index functions %%% +%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% +make_all_runs_index(LogDir) -> + FullName = filename:join(LogDir,?all_runs_name), + Match = filename:join(LogDir,logdir_prefix()++"*.*"), + Dirs = filelib:wildcard(Match), + DirsSorted = (catch sort_all_runs(Dirs)), + Header = all_runs_header(), + Index = [runentry(Dir) || Dir <- DirsSorted], + Result = file:write_file(FullName, + unicode:characters_to_binary( + Header++Index++index_footer())), + Result. + +sort_all_runs(Dirs) -> + %% sort on time string, always last and on the format: + %% "YYYY-MM-DD_HH.MM.SS" + KeyList = + lists:map(fun(Dir) -> + case lists:reverse(string:lexemes(Dir,[$.,$_])) of + [SS,MM,HH,Date|_] -> + {{Date,HH,MM,SS},Dir}; + _Other -> + throw(Dirs) + end + end,Dirs), + lists:reverse(lists:map(fun({_,Dir}) -> + Dir + end,lists:keysort(1,KeyList))). + +runentry(Dir) -> + {MasterStr,NodesStr} = + case read_details_file(Dir) of + {Master,Nodes} when is_list(Nodes) -> + [_,Host] = string:lexemes(atom_to_list(Master),"@"), + {Host,lists:concat(lists:join(", ",Nodes))}; + _Error -> + {"unknown",""} + end, + Index = filename:join(Dir,?nodedir_index_name), + ["\n" + "\n", + "\n", + "\n", + "\n"]. + +all_runs_header() -> + [header("Master Test Runs", {[1],[2,3],[]}) | + ["
\n", + xhtml(["
NodeLog
", + timestamp(Dir),"",MasterStr,"",NodesStr,"
\n"], + ["
\n", + "\n\n"]), + "\n" + "\n" + "\n", + xhtml("", "\n\n")]]. + +timestamp(Dir) -> + [S,Min,H,D,M,Y|_] = lists:reverse(string:lexemes(Dir,".-_")), + [S1,Min1,H1,D1,M1,Y1] = [list_to_integer(N) || N <- [S,Min,H,D,M,Y]], + format_time({{Y1,M1,D1},{H1,Min1,S1}}). + +write_details_file(Dir,Details) -> + FullName = filename:join(Dir,?details_file_name), + force_write_file(FullName,term_to_binary(Details)). + +read_details_file(Dir) -> + FullName = filename:join(Dir,?details_file_name), + case file:read_file(FullName) of + {ok,Bin} -> + binary_to_term(Bin); + Error -> + Error + end. + +%%%-------------------------------------------------------------------- +%%% Internal functions +%%%-------------------------------------------------------------------- + +header(Title, TableCols) -> + CSSFile = xhtml(fun() -> "" end, + fun() -> make_relative(locate_priv_file(?css_default)) end), + JQueryFile = + xhtml(fun() -> "" end, + fun() -> make_relative(locate_priv_file(?jquery_script)) end), + TableSorterFile = + xhtml(fun() -> "" end, + fun() -> make_relative(locate_priv_file(?tablesorter_script)) end), + + [xhtml(["\n", + "\n"], + ["\n", + "\n"]), + "\n", + "\n", + "" ++ Title ++ "\n", + "\n", + "\n", + xhtml("", + ["\n"]), + xhtml("", + ["\n"]), + xhtml("", + ["\n"]), + xhtml(fun() -> "" end, + fun() -> ct_logs:insert_javascript({tablesorter, + ?sortable_table_name, + TableCols}) end), + "\n", + body_tag(), + "
\n", + "

" ++ Title ++ "

\n", + "
\n"]. + +index_footer() -> + ["\n
HistoryMaster HostTest Nodes
\n" + "
\n" | footer()]. + +footer() -> + ["
\n", + xhtml("

\n", "
\n"), + xhtml("

\n", "

"), + "Copyright © ", year(), + " Open Telecom Platform", + xhtml("
\n", "
\n"), + "Updated: ", current_time(), "", + xhtml("
\n", "
\n"), + xhtml("

\n", "
\n"), + "
\n" + "\n"]. + +body_tag() -> + xhtml("\n", + "\n"). + +current_time() -> + format_time(calendar:local_time()). + +format_time({{Y, Mon, D}, {H, Min, S}}) -> + Weekday = weekday(calendar:day_of_the_week(Y, Mon, D)), + lists:flatten(io_lib:format("~s ~s ~2.2.0w ~w ~2.2.0w:~2.2.0w:~2.2.0w", + [Weekday, month(Mon), D, Y, H, Min, S])). + +weekday(1) -> "Mon"; +weekday(2) -> "Tue"; +weekday(3) -> "Wed"; +weekday(4) -> "Thu"; +weekday(5) -> "Fri"; +weekday(6) -> "Sat"; +weekday(7) -> "Sun". + +month(1) -> "Jan"; +month(2) -> "Feb"; +month(3) -> "Mar"; +month(4) -> "Apr"; +month(5) -> "May"; +month(6) -> "Jun"; +month(7) -> "Jul"; +month(8) -> "Aug"; +month(9) -> "Sep"; +month(10) -> "Oct"; +month(11) -> "Nov"; +month(12) -> "Dec". + +year() -> + {Y, _, _} = date(), + integer_to_list(Y). + + +make_dirname({{YY,MM,DD},{H,M,S}}) -> + io_lib:format(logdir_prefix()++".~w-~2.2.0w-~2.2.0w_~2.2.0w.~2.2.0w.~2.2.0w", + [YY,MM,DD,H,M,S]). + +logdir_prefix() -> + "ct_master_run". + +log_timestamp(Now) -> + put(log_timestamp,Now), + {_,{H,M,S}} = calendar:now_to_local_time(Now), + lists:flatten(io_lib:format("~2.2.0w:~2.2.0w:~2.2.0w", + [H,M,S])). + +basic_html() -> + case application:get_env(common_test_master, basic_html) of + {ok,true} -> + true; + _ -> + false + end. + +xhtml(HTML, XHTML) -> + ct_logs:xhtml(HTML, XHTML). + +locate_priv_file(File) -> + ct_logs:locate_priv_file(File). + +make_relative(Dir) -> + ct_logs:make_relative(Dir). + +force_write_file(Name,Contents) -> + _ = force_delete(Name), + file:write_file(Name,Contents). + +force_delete(Name) -> + case file:delete(Name) of + {error,eacces} -> + force_rename(Name,Name++".old.",0); + Other -> + Other + end. + +force_rename(From,To,Number) -> + Dest = [To|integer_to_list(Number)], + case file:read_file_info(Dest) of + {ok,_} -> + force_rename(From,To,Number+1); + {error,_} -> + file:rename(From,Dest) + end. + +call(Msg) -> + case whereis(?MODULE) of + undefined -> + {error,does_not_exist}; + Pid -> + MRef = erlang:monitor(process,Pid), + Ref = make_ref(), + ?MODULE ! {Msg,{self(),Ref}}, + receive + {Ref, Result} -> + erlang:demonitor(MRef, [flush]), + Result; + {'DOWN',MRef,process,_,Reason} -> + {error,{process_down,?MODULE,Reason}} + end + end. + +return({To,Ref},Result) -> + To ! {Ref, Result}, + ok. + +cast(Msg) -> + case whereis(?MODULE) of + undefined -> + io:format("Warning: ct_master_logs not started~n"), + {_,_,Content} = Msg, + FormatArgs = get_format_args(Content), + _ = [io:format(Format, Args) || {Format, Args} <- FormatArgs], + ok; + _Pid -> + ?MODULE ! Msg, + ok + end. + +get_format_args(Content) -> + lists:map(fun(C) -> + case C of + {_, FA, _} -> FA; + _ -> C + end + end, Content). + +make_dir(Dir) -> + case file:make_dir(Dir) of + {error, eexist} -> + ok; + Else -> + Else + end. diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_control_helper.erl b/deps/rabbitmq_ct_helpers/src/rabbit_control_helper.erl index de51925db73a..31a80a159040 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_control_helper.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_control_helper.erl @@ -40,7 +40,8 @@ wait_for_async_command(Node) -> command_with_output(Command, Node, Args, Opts) -> Formatted = format_command(Command, Node, Args, Opts), - CommandResult = 'Elixir.RabbitMQCtl':exec_command( + Mod = 'Elixir.RabbitMQCtl', %% To silence a Dialyzer warning. + CommandResult = Mod:exec_command( Formatted, fun(Output,_,_) -> Output end), ct:pal("Executed command ~tp against node ~tp~nResult: ~tp~n", [Formatted, Node, CommandResult]), CommandResult. @@ -50,7 +51,8 @@ format_command(Command, Node, Args, Opts) -> [Command, format_args(Args), format_options([{"--node", Node} | Opts])]), - 'Elixir.OptionParser':split(iolist_to_binary(Formatted)). + Mod = 'Elixir.OptionParser', %% To silence a Dialyzer warning. + Mod:split(iolist_to_binary(Formatted)). format_args(Args) -> iolist_to_binary([ io_lib:format("~tp ", [Arg]) || Arg <- Args ]). diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_broker_helpers.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_broker_helpers.erl index 8c96d3910400..9428811153e4 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_broker_helpers.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_broker_helpers.erl @@ -276,7 +276,7 @@ run_make_dist(Config) -> end; _ -> global:del_lock(LockId, [node()]), - ct:pal(?LOW_IMPORTANCE, "(skip `$MAKE test-dist`)", []), + ct:log(?LOW_IMPORTANCE, "(skip `$MAKE test-dist`)", []), Config end. @@ -394,7 +394,7 @@ wait_for_rabbitmq_nodes(Config, Starting, NodeConfigs, Clustered) -> NodeConfigs1 = [NC || {_, NC} <- NodeConfigs], Config1 = rabbit_ct_helpers:set_config(Config, {rmq_nodes, NodeConfigs1}), - stop_rabbitmq_nodes(Config1), + _ = stop_rabbitmq_nodes(Config1), Error; {Pid, I, NodeConfig} when NodeConfigs =:= [] -> wait_for_rabbitmq_nodes(Config, Starting -- [Pid], @@ -434,6 +434,7 @@ start_rabbitmq_node(Master, Config, NodeConfig, I) -> %% It's unlikely we'll ever succeed to start RabbitMQ. Master ! {self(), Error}, unlink(Master); + %% @todo This might not work right now in at least some cases... {skip, _} -> %% Try again with another TCP port numbers base. NodeConfig4 = move_nonworking_nodedir_away(NodeConfig3), @@ -490,11 +491,15 @@ init_tcp_port_numbers(Config, NodeConfig, I) -> update_tcp_ports_in_rmq_config(NodeConfig2, ?TCP_PORTS_LIST). tcp_port_base_for_broker(Config, I, PortsCount) -> + tcp_port_base_for_broker0(Config, I, PortsCount). + +tcp_port_base_for_broker0(Config, I, PortsCount) -> + Base0 = persistent_term:get(rabbit_ct_tcp_port_base, ?TCP_PORTS_BASE), Base = case rabbit_ct_helpers:get_config(Config, tcp_ports_base) of undefined -> - ?TCP_PORTS_BASE; + Base0; {skip_n_nodes, N} -> - tcp_port_base_for_broker1(?TCP_PORTS_BASE, N, PortsCount); + tcp_port_base_for_broker1(Base0, N, PortsCount); B -> B end, @@ -503,6 +508,7 @@ tcp_port_base_for_broker(Config, I, PortsCount) -> tcp_port_base_for_broker1(Base, I, PortsCount) -> Base + I * PortsCount * ?NODE_START_ATTEMPTS. +%% @todo Refactor to simplify this... update_tcp_ports_in_rmq_config(NodeConfig, [tcp_port_amqp = Key | Rest]) -> NodeConfig1 = rabbit_ct_helpers:merge_app_env(NodeConfig, {rabbit, [{tcp_listeners, [?config(Key, NodeConfig)]}]}), @@ -623,21 +629,52 @@ write_config_file(Config, NodeConfig, _I) -> ConfigFile ++ "\": " ++ file:format_error(Reason)} end. +-define(REQUIRED_FEATURE_FLAGS, [ + %% Required in 3.11: + "virtual_host_metadata," + "quorum_queue," + "implicit_default_bindings," + "maintenance_mode_status," + "user_limits," + %% Required in 3.12: + "stream_queue," + "classic_queue_type_delivery_support," + "tracking_records_in_ets," + "stream_single_active_consumer," + "listener_records_in_ets," + "feature_flags_v2," + "direct_exchange_routing_v2," + "classic_mirrored_queue_version," %% @todo Missing in FF docs!! + %% Required in 3.12 in rabbitmq_management_agent: +% "drop_unroutable_metric," +% "empty_basic_get_metric," + %% Required in 4.0: + "stream_sac_coordinator_unblock_group," + "restart_streams," + "stream_update_config_command," + "stream_filtering," + "message_containers" %% @todo Update FF docs!! It *is* required. +]). + do_start_rabbitmq_node(Config, NodeConfig, I) -> WithPlugins0 = rabbit_ct_helpers:get_config(Config, - broker_with_plugins), + broker_with_plugins), %% @todo This is probably not used. WithPlugins = case is_list(WithPlugins0) of true -> lists:nth(I + 1, WithPlugins0); false -> WithPlugins0 end, ForceUseSecondary = rabbit_ct_helpers:get_config( - Config, force_secondary_umbrella, undefined), + Config, force_secondary, undefined), CanUseSecondary = case ForceUseSecondary of undefined -> (I + 1) rem 2 =:= 0; Override when is_boolean(Override) -> Override end, + UseSecondaryDist = case ?config(secondary_dist, Config) of + false -> false; + _ -> CanUseSecondary + end, UseSecondaryUmbrella = case ?config(secondary_umbrella, Config) of false -> false; _ -> CanUseSecondary @@ -669,25 +706,9 @@ do_start_rabbitmq_node(Config, NodeConfig, I) -> DistArg = re:replace(DistModS, "_dist$", "", [{return, list}]), "-pa \"" ++ DistModPath ++ "\" -proto_dist " ++ DistArg end, - %% Set the net_ticktime. - CurrentTicktime = case net_kernel:get_net_ticktime() of - {ongoing_change_to, T} -> T; - T -> T - end, - StartArgs1 = case rabbit_ct_helpers:get_config(Config, net_ticktime) of - undefined -> - case CurrentTicktime of - 60 -> ok; - _ -> net_kernel:set_net_ticktime(60) - end, - StartArgs0; - Ticktime -> - case CurrentTicktime of - Ticktime -> ok; - _ -> net_kernel:set_net_ticktime(Ticktime) - end, - StartArgs0 ++ " -kernel net_ticktime " ++ integer_to_list(Ticktime) - end, + %% Set the net_ticktime to 5s for all nodes (including CT via CT_OPTS). + %% A lower tick time helps trigger distribution failures faster. + StartArgs1 = StartArgs0 ++ " -kernel net_ticktime 5", ExtraArgs0 = [], ExtraArgs1 = case rabbit_ct_helpers:get_config(Config, rmq_plugins_dir) of undefined -> @@ -699,8 +720,10 @@ do_start_rabbitmq_node(Config, NodeConfig, I) -> StartWithPluginsDisabled = rabbit_ct_helpers:get_config( Config, start_rmq_with_plugins_disabled), ExtraArgs2 = case StartWithPluginsDisabled of - true -> ["LEAVE_PLUGINS_DISABLED=yes" | ExtraArgs1]; - _ -> ExtraArgs1 + true -> + ["LEAVE_PLUGINS_DISABLED=1" | ExtraArgs1]; + _ -> + ExtraArgs1 end, KeepPidFile = rabbit_ct_helpers:get_config( Config, keep_pid_file_on_exit), @@ -744,7 +767,30 @@ do_start_rabbitmq_node(Config, NodeConfig, I) -> {"RABBITMQ_PLUGINS=~ts/rabbitmq-plugins", [SecScriptsDir]} | ExtraArgs4]; false -> - ExtraArgs4 + case UseSecondaryDist of + true -> + SecondaryDist = ?config(secondary_dist, Config), + SecondaryEnabledPlugins = case { + StartWithPluginsDisabled, + ?config(secondary_enabled_plugins, Config), + filename:basename(SrcDir) + } of + {true, _, _} -> ""; + {_, undefined, "rabbit"} -> ""; + {_, undefined, SrcPlugin} -> SrcPlugin; + {_, SecondaryEnabledPlugins0, _} -> SecondaryEnabledPlugins0 + end, + [{"DIST_DIR=~ts/plugins", [SecondaryDist]}, + {"CLI_SCRIPTS_DIR=~ts/sbin", [SecondaryDist]}, + {"CLI_ESCRIPTS_DIR=~ts/escript", [SecondaryDist]}, + {"RABBITMQ_SCRIPTS_DIR=~ts/sbin", [SecondaryDist]}, + {"RABBITMQ_SERVER=~ts/sbin/rabbitmq-server", [SecondaryDist]}, + {"RABBITMQ_ENABLED_PLUGINS=~ts", [SecondaryEnabledPlugins]}, + {"RABBITMQ_FEATURE_FLAGS=~ts", [?REQUIRED_FEATURE_FLAGS]} + | ExtraArgs4]; + false -> + ExtraArgs4 + end end, MakeVars = [ {"RABBITMQ_NODENAME=~ts", [Nodename]}, @@ -754,7 +800,6 @@ do_start_rabbitmq_node(Config, NodeConfig, I) -> {"RABBITMQ_SERVER_START_ARGS=~ts", [StartArgs1]}, {"RABBITMQ_SERVER_ADDITIONAL_ERL_ARGS=+S 2 +sbwt very_short +A 24 ~ts", [AdditionalErlArgs]}, "RABBITMQ_LOG=debug", - "RMQCTL_WAIT_TIMEOUT=180", {"TEST_TMPDIR=~ts", [PrivDir]} | ExtraArgs], Cmd = ["start-background-broker" | MakeVars], @@ -770,6 +815,7 @@ do_start_rabbitmq_node(Config, NodeConfig, I) -> _ -> AbortCmd = ["stop-node" | MakeVars], _ = rabbit_ct_helpers:make(Config, SrcDir, AbortCmd), + %% @todo Need to stop all nodes in the cluster, not just the one node. {skip, "Failed to initialize RabbitMQ"} end; RunCmd -> @@ -832,7 +878,7 @@ query_node(Config, NodeConfig) -> %% 3.7.x node. If this is the case, we can ignore %% this and leave the `enabled_plugins_file` config %% variable unset. - ct:pal("NO RABBITMQ_FEATURE_FLAGS_FILE"), + ct:log("NO RABBITMQ_FEATURE_FLAGS_FILE"), Vars0 end, cover_add_node(Nodename), @@ -896,7 +942,7 @@ handle_nodes_in_parallel(NodeConfigs, Fun) -> T1 = erlang:monotonic_time(), Ret = Fun(NodeConfig), T2 = erlang:monotonic_time(), - ct:pal( + ct:log( ?LOW_IMPORTANCE, "Time to run ~tp for node ~ts: ~b us", [Fun, @@ -914,7 +960,7 @@ handle_nodes_in_parallel(NodeConfigs, Fun) -> wait_for_node_handling([], Fun, T0, Results) -> T3 = erlang:monotonic_time(), - ct:pal( + ct:log( ?LOW_IMPORTANCE, "Time to run ~tp for all nodes: ~b us", [Fun, erlang:convert_time_unit(T3 - T0, native, microsecond)]), @@ -929,7 +975,7 @@ wait_for_node_handling(Procs, Fun, T0, Results) -> move_nonworking_nodedir_away(NodeConfig) -> ConfigFile = ?config(erlang_node_config_filename, NodeConfig), ConfigDir = filename:dirname(ConfigFile), - case os:getenv("RABBITMQ_CT_HELPERS_DELETE_UNUSED_NODES") =/= false + ok = case os:getenv("RABBITMQ_CT_HELPERS_DELETE_UNUSED_NODES") =/= false andalso ?OTP_RELEASE >= 23 of true -> file:del_dir_r(ConfigDir); @@ -969,7 +1015,7 @@ configured_metadata_store(Config) -> end. configure_metadata_store(Config) -> - ct:pal("Configuring metadata store..."), + ct:log("Configuring metadata store..."), case configured_metadata_store(Config) of {khepri, FFs0} -> case enable_khepri_metadata_store(Config, FFs0) of @@ -980,12 +1026,12 @@ configure_metadata_store(Config) -> Config1 end; mnesia -> - ct:pal("Enabling Mnesia metadata store"), + ct:log("Enabling Mnesia metadata store"), Config end. enable_khepri_metadata_store(Config, FFs0) -> - ct:pal("Enabling Khepri metadata store"), + ct:log("Enabling Khepri metadata store"), FFs = [khepri_db | FFs0], lists:foldl(fun(_FF, {skip, _Reason} = Skip) -> Skip; @@ -1147,7 +1193,7 @@ stop_rabbitmq_node(Config, NodeConfig) -> {"RABBITMQ_NODENAME_FOR_PATHS=~ts", [InitialNodename]} ], Cmd = ["stop-node" | MakeVars], - case rabbit_ct_helpers:get_config(Config, rabbitmq_run_cmd) of + _ = case rabbit_ct_helpers:get_config(Config, rabbitmq_run_cmd) of undefined -> rabbit_ct_helpers:make(Config, SrcDir, Cmd); RunCmd -> @@ -1156,7 +1202,7 @@ stop_rabbitmq_node(Config, NodeConfig) -> NodeConfig. find_crashes_in_logs(NodeConfigs, IgnoredCrashes) -> - ct:pal( + ct:log( "Looking up any crash reports in the nodes' log files. If we find " "some, they will appear below:"), CrashesCount = lists:foldl( @@ -1165,7 +1211,11 @@ find_crashes_in_logs(NodeConfigs, IgnoredCrashes) -> NodeConfig, IgnoredCrashes), Total + Count end, 0, NodeConfigs), - ct:pal("Found ~b crash report(s)", [CrashesCount]), + LogFn = case CrashesCount of + 0 -> log; + _ -> pal + end, + ct:LogFn("Found ~b crash report(s)", [CrashesCount]), ?assertEqual(0, CrashesCount). count_crashes_in_logs(NodeConfig, IgnoredCrashes) -> @@ -1294,6 +1344,10 @@ rabbitmqctl(Config, Node, Args, Timeout) -> CanUseSecondary = (I + 1) rem 2 =:= 0, BazelRunSecCmd = rabbit_ct_helpers:get_config( Config, rabbitmq_run_secondary_cmd), + UseSecondaryDist = case ?config(secondary_dist, Config) of + false -> false; + _ -> CanUseSecondary + end, UseSecondaryUmbrella = case ?config(secondary_umbrella, Config) of false -> case BazelRunSecCmd of @@ -1336,7 +1390,14 @@ rabbitmqctl(Config, Node, Args, Timeout) -> "rabbitmqctl"]) end; false -> - ?config(rabbitmqctl_cmd, Config) + case UseSecondaryDist of + true -> + SecondaryDist = ?config(secondary_dist, Config), + rabbit_misc:format( + "~ts/sbin/rabbitmqctl", [SecondaryDist]); + false -> + ?config(rabbitmqctl_cmd, Config) + end end, NodeConfig = get_node_config(Config, Node), @@ -1929,10 +1990,8 @@ restart_node(Config, Node) -> stop_node(Config, Node) -> NodeConfig = get_node_config(Config, Node), - case stop_rabbitmq_node(Config, NodeConfig) of - {skip, _} = Error -> Error; - _ -> ok - end. + _ = stop_rabbitmq_node(Config, NodeConfig), + ok. stop_node_after(Config, Node, Sleep) -> timer:sleep(Sleep), @@ -1955,7 +2014,7 @@ kill_node(Config, Node) -> _ -> rabbit_misc:format("kill -9 ~ts", [Pid]) end, - os:cmd(Cmd), + _ = os:cmd(Cmd), await_os_pid_death(Pid). kill_node_after(Config, Node, Sleep) -> @@ -2246,7 +2305,7 @@ if_cover(F) -> os:getenv("COVERAGE") } of {false, false} -> ok; - _ -> F() + _ -> _ = F(), ok end. setup_meck(Config) -> diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_config_schema.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_config_schema.erl index 7baee0264bb8..09c9b6108734 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_config_schema.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_config_schema.erl @@ -24,7 +24,7 @@ init_schemas(App, Config) -> run_snippets(Config) -> {ok, [Snippets]} = file:consult(?config(conf_snippets, Config)), ct:pal("Loaded config schema snippets: ~tp", [Snippets]), - lists:map( + lists:foreach( fun({N, S, C, P}) -> ok = test_snippet(Config, {snippet_id(N), S, []}, C, P, true); ({N, S, A, C, P}) -> @@ -70,12 +70,12 @@ test_snippet(Config, Snippet = {SnipID, _, _}, Expected, _Plugins, Sort) -> write_snippet(Config, {Name, Conf, Advanced}) -> ResultsDir = ?config(results_dir, Config), - file:make_dir(filename:join(ResultsDir, Name)), + _ = file:make_dir(filename:join(ResultsDir, Name)), ConfFile = filename:join([ResultsDir, Name, "config.conf"]), AdvancedFile = filename:join([ResultsDir, Name, "advanced.config"]), - file:write_file(ConfFile, Conf), - rabbit_file:write_term_file(AdvancedFile, [Advanced]), + ok = file:write_file(ConfFile, Conf), + ok = rabbit_file:write_term_file(AdvancedFile, [Advanced]), {ConfFile, AdvancedFile}. generate_config(ConfFile, AdvancedFile) -> diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl index ee109b9f9c56..f0e7490c60bb 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_helpers.erl @@ -66,7 +66,7 @@ log_environment() -> Vars = lists:sort(fun(A, B) -> A =< B end, os:getenv()), - ct:pal(?LOW_IMPORTANCE, "Environment variables:~n~ts", + ct:log(?LOW_IMPORTANCE, "Environment variables:~n~ts", [[io_lib:format(" ~ts~n", [V]) || V <- Vars]]). run_setup_steps(Config) -> @@ -78,6 +78,7 @@ run_setup_steps(Config, ExtraSteps) -> [ fun init_skip_as_error_flag/1, fun guess_tested_erlang_app_name/1, + fun ensure_secondary_dist/1, fun ensure_secondary_umbrella/1, fun ensure_current_srcdir/1, fun ensure_rabbitmq_ct_helpers_srcdir/1, @@ -152,13 +153,13 @@ run_steps(Config, []) -> Config. redirect_logger_to_ct_logs(Config) -> - ct:pal( + ct:log( ?LOW_IMPORTANCE, "Configuring logger to send logs to common_test logs"), - logger:set_handler_config(cth_log_redirect, level, debug), + ok = logger:set_handler_config(cth_log_redirect, level, debug), %% Let's use the same format as RabbitMQ itself. - logger:set_handler_config( + ok = logger:set_handler_config( cth_log_redirect, formatter, rabbit_prelaunch_early_logging:default_file_formatter(#{})), @@ -170,9 +171,9 @@ redirect_logger_to_ct_logs(Config) -> cth_log_redirect_any_domains, cth_log_redirect_any_domains, LogCfg), - logger:remove_handler(default), + ok = logger:remove_handler(default), - ct:pal( + ct:log( ?LOW_IMPORTANCE, "Logger configured to send logs to common_test logs; you should see " "a message below saying so"), @@ -201,6 +202,18 @@ guess_tested_erlang_app_name(Config) -> set_config(Config, {tested_erlang_app, list_to_atom(AppName)}) end. +ensure_secondary_dist(Config) -> + Path = case get_config(Config, secondary_dist) of + undefined -> os:getenv("SECONDARY_DIST"); + P -> P + end, + %% Hard fail if the path is invalid. + case Path =:= false orelse filelib:is_dir(Path) of + true -> ok; + false -> error(secondary_dist_path_invalid) + end, + set_config(Config, {secondary_dist, Path}). + ensure_secondary_umbrella(Config) -> Path = case get_config(Config, secondary_umbrella) of undefined -> os:getenv("SECONDARY_UMBRELLA"); @@ -433,12 +446,12 @@ ensure_rabbitmqctl_cmd(Config) -> false -> find_script(Config, "rabbitmqctl"); R -> - ct:pal(?LOW_IMPORTANCE, + ct:log(?LOW_IMPORTANCE, "Using rabbitmqctl from RABBITMQCTL: ~tp~n", [R]), R end; R -> - ct:pal(?LOW_IMPORTANCE, + ct:log(?LOW_IMPORTANCE, "Using rabbitmqctl from rabbitmqctl_cmd: ~tp~n", [R]), R end, @@ -470,7 +483,7 @@ find_script(Config, Script) -> filelib:is_file(File)], case Locations of [Location | _] -> - ct:pal(?LOW_IMPORTANCE, "Using ~ts at ~tp~n", [Script, Location]), + ct:log(?LOW_IMPORTANCE, "Using ~ts at ~tp~n", [Script, Location]), Location; [] -> false @@ -555,7 +568,7 @@ ensure_rabbitmq_queues_cmd(Config) -> R -> R end; R -> - ct:pal(?LOW_IMPORTANCE, + ct:log(?LOW_IMPORTANCE, "Using rabbitmq-queues from rabbitmq_queues_cmd: ~tp~n", [R]), R end, @@ -659,12 +672,12 @@ symlink_priv_dir(Config) -> Target = filename:join([SrcDir, "logs", Name]), case exec(["ln", "-snf", PrivDir, Target]) of {ok, _} -> ok; - _ -> ct:pal(?LOW_IMPORTANCE, + _ -> ct:log(?LOW_IMPORTANCE, "Failed to symlink private_log directory.") end, Config; not_found -> - ct:pal(?LOW_IMPORTANCE, + ct:log(?LOW_IMPORTANCE, "Failed to symlink private_log directory."), Config end @@ -689,9 +702,8 @@ load_elixir(Config) -> {skip, _} = Skip -> Skip; ElixirLibDir -> - ct:pal(?LOW_IMPORTANCE, "Elixir lib dir: ~ts~n", [ElixirLibDir]), + ct:log(?LOW_IMPORTANCE, "Elixir lib dir: ~ts~n", [ElixirLibDir]), true = code:add_pathz(ElixirLibDir), - application:load(elixir), {ok, _} = application:ensure_all_started(elixir), Config end. @@ -726,14 +738,18 @@ long_running_testsuite_monitor(TimerRef, Testcases) -> long_running_testsuite_monitor(TimerRef, Testcases1); ping_ct -> T1 = erlang:monotonic_time(seconds), - ct:pal(?STD_IMPORTANCE, "Testcases still in progress:~ts", - [[ + InProgress = [ begin TDiff = format_time_diff(T1, T0), rabbit_misc:format("~n - ~ts (~ts)", [TC, TDiff]) end || {TC, T0} <- Testcases - ]]), + ], + case InProgress of + [] -> ok; + _ -> ct:pal(?STD_IMPORTANCE, "Testcases still in progress:~ts", + [InProgress]) + end, long_running_testsuite_monitor(TimerRef, Testcases); stop -> timer:cancel(TimerRef) @@ -911,7 +927,7 @@ exec([Cmd | Args], Options) when is_list(Cmd) orelse is_binary(Cmd) -> %% Because Args1 may contain binaries, we don't use string:join(). %% Instead we do a list comprehension. ArgsIoList = [Cmd1, [[$\s, Arg] || Arg <- Args1]], - ct:pal(?LOW_IMPORTANCE, Log1, [ArgsIoList, self()]), + ct:log(?LOW_IMPORTANCE, Log1, [ArgsIoList, self()]), try Port = erlang:open_port( {spawn_executable, Cmd1}, [ @@ -952,15 +968,15 @@ port_receive_loop(Port, Stdout, Options, Until, DumpTimer) -> end, receive {Port, {exit_status, X}} -> - timer:cancel(DumpTimer), + _ = timer:cancel(DumpTimer), DropStdout = lists:member(drop_stdout, Options) orelse Stdout =:= "", if DropStdout -> - ct:pal(?LOW_IMPORTANCE, "Exit code: ~tp (pid ~tp)", + ct:log(?LOW_IMPORTANCE, "Exit code: ~tp (pid ~tp)", [X, self()]); true -> - ct:pal(?LOW_IMPORTANCE, "~ts~nExit code: ~tp (pid ~tp)", + ct:log(?LOW_IMPORTANCE, "~ts~nExit code: ~tp (pid ~tp)", [Stdout, X, self()]) end, case proplists:get_value(match_stdout, Options) of @@ -982,7 +998,7 @@ port_receive_loop(Port, Stdout, Options, Until, DumpTimer) -> DropStdout -> ok; true -> - ct:pal(?LOW_IMPORTANCE, "~ts~n[Command still in progress] (pid ~tp)", + ct:log(?LOW_IMPORTANCE, "~ts~n[Command still in progress] (pid ~tp)", [Stdout, self()]) end, port_receive_loop(Port, Stdout, Options, Until, stdout_dump_timer()); @@ -1062,11 +1078,13 @@ convert_to_unicode_binary(Arg) when is_binary(Arg) -> Arg. is_mixed_versions() -> - os:getenv("SECONDARY_UMBRELLA") =/= false + os:getenv("SECONDARY_DIST") =/= false + orelse os:getenv("SECONDARY_UMBRELLA") =/= false orelse os:getenv("RABBITMQ_RUN_SECONDARY") =/= false. is_mixed_versions(Config) -> - get_config(Config, secondary_umbrella, false) =/= false + get_config(Config, secondary_dist, false) =/= false + orelse get_config(Config, secondary_umbrella, false) =/= false orelse get_config(Config, rabbitmq_run_secondary_cmd, false) =/= false. %% ------------------------------------------------------------------- @@ -1107,7 +1125,7 @@ eventually({Line, Assertion} = TestObj, PollInterval, PollCount) ok -> ok; Err -> - ct:pal(?LOW_IMPORTANCE, + ct:log(?LOW_IMPORTANCE, "Retrying in ~bms for ~b more times due to failed assertion in line ~b: ~tp", [PollInterval, PollCount - 1, Line, Err]), timer:sleep(PollInterval), diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_ct_vm_helpers.erl b/deps/rabbitmq_ct_helpers/src/rabbit_ct_vm_helpers.erl index b98cb0dd862a..490ccda377f7 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_ct_vm_helpers.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_ct_vm_helpers.erl @@ -499,7 +499,7 @@ spawn_terraform_vms(Config) -> rabbit_ct_helpers:register_teardown_steps( Config1, teardown_steps()); _ -> - destroy_terraform_vms(Config), + _ = destroy_terraform_vms(Config), {skip, "Terraform failed to spawn VM"} end. @@ -520,7 +520,7 @@ destroy_terraform_vms(Config) -> ] ++ TfVarFlags ++ [ TfConfigDir ], - rabbit_ct_helpers:exec(Cmd, [{env, Env}]), + {ok, _} = rabbit_ct_helpers:exec(Cmd, [{env, Env}]), Config. terraform_var_flags(Config) -> @@ -696,7 +696,7 @@ ensure_instance_count(Config, TRef) -> poll_vms(Config) end; true -> - timer:cancel(TRef), + _ = timer:cancel(TRef), rabbit_ct_helpers:set_config(Config, {terraform_poll_done, true}) end; @@ -760,7 +760,7 @@ initialize_ct_peers(Config, NodenamesMap, IPAddrsMap) -> set_inet_hosts(Config) -> CTPeers = get_ct_peer_entries(Config), inet_db:set_lookup([file, native]), - [begin + _ = [begin Hostname = ?config(hostname, CTPeerConfig), IPAddr = ?config(ipaddr, CTPeerConfig), inet_db:add_host(IPAddr, [Hostname]), @@ -831,7 +831,7 @@ wait_for_ct_peers(Config, [CTPeer | Rest] = CTPeers, TRef) -> end end; wait_for_ct_peers(Config, [], TRef) -> - timer:cancel(TRef), + _ = timer:cancel(TRef), Config. set_ct_peers_code_path(Config) -> @@ -864,7 +864,7 @@ download_dirs(Config) -> ?MODULE, prepare_dirs_to_download_archives, [Config]), - inets:start(), + _ = inets:start(), download_dirs(Config, ConfigsPerCTPeer). download_dirs(_, [{skip, _} = Error | _]) -> @@ -964,7 +964,7 @@ add_archive_to_list(Config, Archive) -> start_http_server(Config) -> PrivDir = ?config(priv_dir, Config), {ok, Hostname} = inet:gethostname(), - inets:start(), + _ = inets:start(), Options = [{port, 0}, {server_name, Hostname}, {server_root, PrivDir}, @@ -1021,7 +1021,8 @@ do_setup_ct_logs_proxies(Nodes) -> [begin user_io_proxy(Node), ct_logs_proxy(Node) - end || Node <- Nodes]. + end || Node <- Nodes], + ok. user_io_proxy(Node) -> ok = setup_proxy(Node, user). diff --git a/deps/rabbitmq_ct_helpers/src/rabbit_mgmt_test_util.erl b/deps/rabbitmq_ct_helpers/src/rabbit_mgmt_test_util.erl index ca606adf9530..20b833194624 100644 --- a/deps/rabbitmq_ct_helpers/src/rabbit_mgmt_test_util.erl +++ b/deps/rabbitmq_ct_helpers/src/rabbit_mgmt_test_util.erl @@ -107,7 +107,7 @@ uri_base_from(Config, Node) -> uri_base_from(Config, Node, Base) -> Port = mgmt_port(Config, Node), Prefix = get_uri_prefix(Config), - Uri = rabbit_mgmt_format:print("http://localhost:~w~ts/~ts", [Port, Prefix, Base]), + Uri = list_to_binary(lists:flatten(io_lib:format("http://localhost:~w~ts/~ts", [Port, Prefix, Base]))), binary_to_list(Uri). get_uri_prefix(Config) -> diff --git a/deps/rabbitmq_mqtt/BUILD.bazel b/deps/rabbitmq_mqtt/BUILD.bazel index 71c5d7e1f68c..ca0c97809625 100644 --- a/deps/rabbitmq_mqtt/BUILD.bazel +++ b/deps/rabbitmq_mqtt/BUILD.bazel @@ -225,7 +225,7 @@ rabbitmq_integration_suite( ) rabbitmq_integration_suite( - name = "shared_SUITE", + name = "mqtt_shared_SUITE", size = "large", additional_beam = [ ":test_util_beam", diff --git a/deps/rabbitmq_mqtt/Makefile b/deps/rabbitmq_mqtt/Makefile index 64bfb24e5116..c8ebda54547b 100644 --- a/deps/rabbitmq_mqtt/Makefile +++ b/deps/rabbitmq_mqtt/Makefile @@ -42,7 +42,7 @@ export BUILD_WITHOUT_QUIC LOCAL_DEPS = ssl DEPS = ranch rabbit_common rabbit amqp10_common -TEST_DEPS = emqtt ct_helper rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_management rabbitmq_web_mqtt amqp_client rabbitmq_consistent_hash_exchange rabbitmq_amqp_client rabbitmq_stomp rabbitmq_stream +TEST_DEPS = emqtt ct_helper rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_management amqp_client rabbitmq_consistent_hash_exchange rabbitmq_amqp_client rabbitmq_stomp rabbitmq_stream PLT_APPS += rabbitmqctl elixir @@ -58,3 +58,92 @@ include ../../erlang.mk clean:: if test -d test/java_SUITE_data; then cd test/java_SUITE_data && $(MAKE) clean; fi + +# Parallel CT. +# +# @todo Move most of this in common files. + +define ct_master.erl + StartOpts = #{ + host => "localhost", + connection => standard_io, + args => ["-hidden"] + }, + {ok, Pid1, _} = peer:start(StartOpts#{name => "rabbit_shard1"}), + {ok, Pid2, _} = peer:start(StartOpts#{name => "rabbit_shard2"}), + {ok, Pid3, _} = peer:start(StartOpts#{name => "rabbit_shard3"}), + {ok, Pid4, _} = peer:start(StartOpts#{name => "rabbit_shard4"}), + peer:call(Pid1, net_kernel, set_net_ticktime, [5]), + peer:call(Pid2, net_kernel, set_net_ticktime, [5]), + peer:call(Pid3, net_kernel, set_net_ticktime, [5]), + peer:call(Pid4, net_kernel, set_net_ticktime, [5]), + peer:call(Pid1, persistent_term, put, [rabbit_ct_tcp_port_base, 23000]), + peer:call(Pid2, persistent_term, put, [rabbit_ct_tcp_port_base, 25000]), + peer:call(Pid3, persistent_term, put, [rabbit_ct_tcp_port_base, 27000]), + peer:call(Pid4, persistent_term, put, [rabbit_ct_tcp_port_base, 29000]), + [{[_], {ok, Results}}] = ct_master_fork:run("$1"), + peer:stop(Pid4), + peer:stop(Pid3), + peer:stop(Pid2), + peer:stop(Pid1), + lists:foldl(fun + ({_, {_, 0, {_, 0}}}, Err) -> Err + 1; + (What, Peer) -> halt(Peer) + end, 1, Results), + halt(0) +endef + +PARALLEL_CT_SET_1_A = auth retainer +PARALLEL_CT_SET_1_B = cluster command config config_schema mc_mqtt packet_prop \ + processor protocol_interop proxy_protocol rabbit_mqtt_confirms reader util +PARALLEL_CT_SET_1_C = java v5 +PARALLEL_CT_SET_1_D = mqtt_shared + +PARALLEL_CT_SUITES = $(PARALLEL_CT_SET_1_A) $(PARALLEL_CT_SET_1_B) $(PARALLEL_CT_SET_1_C) $(PARALLEL_CT_SET_1_D) + +ifeq ($(filter-out $(SEQUENTIAL_CT_SUITES) $(PARALLEL_CT_SUITES),$(CT_SUITES)),) +parallel-ct-sanity-check: + $(verbose) : +else +parallel-ct-sanity-check: + $(verbose) printf "%s\n" \ + "In order for new test suites to be run in CI, the test suites" \ + "must be added to one of the PARALLEL_CT_SET__ variables." \ + "" \ + "The following test suites are missing:" \ + "$(filter-out $(SEQUENTIAL_CT_SUITES) $(PARALLEL_CT_SUITES),$(CT_SUITES))" + $(verbose) exit 1 +endif + +define tpl_parallel_ct_test_spec +{logdir, "$(CT_LOGS_DIR)"}. +{logdir, master, "$(CT_LOGS_DIR)"}. +{create_priv_dir, all_nodes, auto_per_run}. +{auto_compile, false}. + +{node, shard1, 'rabbit_shard1@localhost'}. +{node, shard2, 'rabbit_shard2@localhost'}. +{node, shard3, 'rabbit_shard3@localhost'}. +{node, shard4, 'rabbit_shard4@localhost'}. + +{define, 'Set1', [$(call comma_list,$(addsuffix _SUITE,$1))]}. +{define, 'Set2', [$(call comma_list,$(addsuffix _SUITE,$2))]}. +{define, 'Set3', [$(call comma_list,$(addsuffix _SUITE,$3))]}. +{define, 'Set4', [$(call comma_list,$(addsuffix _SUITE,$4))]}. + +{suites, shard1, "test/", 'Set1'}. +{suites, shard2, "test/", 'Set2'}. +{suites, shard3, "test/", 'Set3'}. +{suites, shard4, "test/", 'Set4'}. +endef + +define parallel_ct_set_target +tpl_parallel_ct_test_spec_set_$1 = $$(call tpl_parallel_ct_test_spec,$(PARALLEL_CT_SET_$(1)_A),$(PARALLEL_CT_SET_$(1)_B),$(PARALLEL_CT_SET_$(1)_C),$(PARALLEL_CT_SET_$(1)_D)) + +parallel-ct-set-$(1): test-build + $(verbose) mkdir -p $(CT_LOGS_DIR) + $(verbose) $$(call core_render,tpl_parallel_ct_test_spec_set_$(1),ct.set-$(1).spec) + $$(call erlang,$$(call ct_master.erl,ct.set-$(1).spec),-sname parallel_ct_$(PROJECT)@localhost -hidden -kernel net_ticktime 5) +endef + +$(foreach set,1,$(eval $(call parallel_ct_set_target,$(set)))) diff --git a/deps/rabbitmq_mqtt/test/command_SUITE.erl b/deps/rabbitmq_mqtt/test/command_SUITE.erl index 528c4b0b1b97..864727077c40 100644 --- a/deps/rabbitmq_mqtt/test/command_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/command_SUITE.erl @@ -85,12 +85,6 @@ run(Config) -> %% No connections [] = 'Elixir.Enum':to_list(?COMMAND:run([], Opts)), - %% Open a WebMQTT connection, command won't list it - WebMqttConfig = [{websocket, true} | Config], - _C0 = connect(<<"simpleWebMqttClient">>, WebMqttConfig, [{ack_timeout, 1}]), - - [] = 'Elixir.Enum':to_list(?COMMAND:run([], Opts)), - %% Open a connection C1 = connect(<<"simpleClient">>, Config, [{ack_timeout, 1}]), diff --git a/deps/rabbitmq_mqtt/test/shared_SUITE.erl b/deps/rabbitmq_mqtt/test/mqtt_shared_SUITE.erl similarity index 99% rename from deps/rabbitmq_mqtt/test/shared_SUITE.erl rename to deps/rabbitmq_mqtt/test/mqtt_shared_SUITE.erl index 2101d9039c26..4f83b55f1fbf 100644 --- a/deps/rabbitmq_mqtt/test/shared_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/mqtt_shared_SUITE.erl @@ -10,7 +10,9 @@ %% %% In other words, this test suite should not contain any test case that is executed %% only with a particular plugin or particular MQTT version. --module(shared_SUITE). +%% +%% When adding a test case here the same function must be defined in web_mqtt_shared_SUITE. +-module(mqtt_shared_SUITE). -compile([export_all, nowarn_export_all]). @@ -53,23 +55,13 @@ -define(RC_SESSION_TAKEN_OVER, 16#8E). all() -> - [{group, mqtt}, - {group, web_mqtt}]. + [{group, mqtt}]. %% The code being tested under v3 and v4 is almost identical. %% To save time in CI, we therefore run only a very small subset of tests in v3. groups() -> [ {mqtt, [], - [{cluster_size_1, [], - [{v3, [], cluster_size_1_tests_v3()}, - {v4, [], cluster_size_1_tests()}, - {v5, [], cluster_size_1_tests()}]}, - {cluster_size_3, [], - [{v4, [], cluster_size_3_tests()}, - {v5, [], cluster_size_3_tests()}]} - ]}, - {web_mqtt, [], [{cluster_size_1, [], [{v3, [], cluster_size_1_tests_v3()}, {v4, [], cluster_size_1_tests()}, @@ -173,9 +165,6 @@ end_per_suite(Config) -> init_per_group(mqtt, Config) -> rabbit_ct_helpers:set_config(Config, {websocket, false}); -init_per_group(web_mqtt, Config) -> - rabbit_ct_helpers:set_config(Config, {websocket, true}); - init_per_group(Group, Config) when Group =:= v3; Group =:= v4; @@ -216,8 +205,6 @@ init_per_testcase(Testcase, Config) -> init_per_testcase0(Testcase, Config). init_per_testcase0(Testcase, Config) -> - Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - [ok = rabbit_ct_broker_helpers:enable_plugin(Config, N, rabbitmq_web_mqtt) || N <- Nodes], rabbit_ct_helpers:testcase_started(Config, Testcase). end_per_testcase(T, Config) diff --git a/deps/rabbitmq_mqtt/test/v5_SUITE.erl b/deps/rabbitmq_mqtt/test/v5_SUITE.erl index 8b252003b3c3..fa977fd3a24d 100644 --- a/deps/rabbitmq_mqtt/test/v5_SUITE.erl +++ b/deps/rabbitmq_mqtt/test/v5_SUITE.erl @@ -42,16 +42,11 @@ -define(RC_TOPIC_ALIAS_INVALID, 16#94). all() -> - [{group, mqtt}, - {group, web_mqtt}]. + [{group, mqtt}]. groups() -> [ {mqtt, [], - [{cluster_size_1, [shuffle], cluster_size_1_tests()}, - {cluster_size_3, [shuffle], cluster_size_3_tests()} - ]}, - {web_mqtt, [], [{cluster_size_1, [shuffle], cluster_size_1_tests()}, {cluster_size_3, [shuffle], cluster_size_3_tests()} ]} @@ -153,9 +148,6 @@ end_per_suite(Config) -> init_per_group(mqtt, Config) -> rabbit_ct_helpers:set_config(Config, {websocket, false}); -init_per_group(web_mqtt, Config) -> - rabbit_ct_helpers:set_config(Config, {websocket, true}); - init_per_group(Group, Config0) -> Nodes = case Group of cluster_size_1 -> 1; @@ -198,8 +190,6 @@ init_per_testcase(T, Config) -> init_per_testcase0(T, Config). init_per_testcase0(Testcase, Config) -> - Nodes = rabbit_ct_broker_helpers:get_node_configs(Config, nodename), - [ok = rabbit_ct_broker_helpers:enable_plugin(Config, N, rabbitmq_web_mqtt) || N <- Nodes], rabbit_ct_helpers:testcase_started(Config, Testcase). end_per_testcase(T, Config) diff --git a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl index 9a0fc9da426f..082c5c09c7bc 100644 --- a/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl +++ b/deps/rabbitmq_peer_discovery_etcd/src/rabbitmq_peer_discovery_etcd_v3_client.erl @@ -87,7 +87,7 @@ callback_mode() -> [state_functions, state_enter]. terminate(Reason, State, Data) -> rabbit_log:debug("etcd v3 API client will terminate in state ~tp, reason: ~tp", [State, Reason]), - disconnect(?ETCD_CONN_NAME, Data), + _ = disconnect(?ETCD_CONN_NAME, Data), rabbit_log:debug("etcd v3 API client has disconnected"), rabbit_log:debug("etcd v3 API client: total number of connections to etcd is ~tp", [length(eetcd_conn_sup:info())]), ok. @@ -157,13 +157,13 @@ recover(internal, start, Data = #statem_data{endpoints = Endpoints, connection_m }}; {error, Errors} -> [rabbit_log:error("etcd peer discovery: failed to connect to endpoint ~tp: ~tp", [Endpoint, Err]) || {Endpoint, Err} <- Errors], - ensure_disconnected(?ETCD_CONN_NAME, Data), + _ = ensure_disconnected(?ETCD_CONN_NAME, Data), Actions = [{state_timeout, reconnection_interval(), recover}], {keep_state, reset_statem_data(Data), Actions} end; recover(state_timeout, _PrevState, Data) -> rabbit_log:debug("etcd peer discovery: connection entered a reconnection delay state"), - ensure_disconnected(?ETCD_CONN_NAME, Data), + _ = ensure_disconnected(?ETCD_CONN_NAME, Data), {next_state, recover, reset_statem_data(Data)}; recover({call, From}, Req, _Data) -> rabbit_log:error("etcd v3 API: client received a call ~tp while not connected, will do nothing", [Req]), diff --git a/deps/rabbitmq_prelaunch/Makefile b/deps/rabbitmq_prelaunch/Makefile index 38c4b940ab3e..ee82d02d3c39 100644 --- a/deps/rabbitmq_prelaunch/Makefile +++ b/deps/rabbitmq_prelaunch/Makefile @@ -3,9 +3,9 @@ PROJECT_DESCRIPTION = RabbitMQ prelaunch setup PROJECT_VERSION = 4.0.0 PROJECT_MOD = rabbit_prelaunch_app -DEPS = rabbit_common cuttlefish thoas +DEPS = rabbit_common cuttlefish thoas osiris systemd -PLT_APPS += runtime_tools eunit osiris systemd +PLT_APPS += runtime_tools eunit DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk diff --git a/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl index c2f27226a1c5..07fcd86a7f10 100644 --- a/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl +++ b/deps/rabbitmq_prelaunch/src/rabbit_prelaunch_conf.erl @@ -11,9 +11,8 @@ generate_config_from_cuttlefish_files/3, decrypt_config/1]). --ifdef(TEST). +%% Only used in tests. -export([decrypt_config/2]). --endif. %% These can be removed when we only support OTP-26+. -ignore_xref([{user_drv, whereis_group, 0}, diff --git a/deps/rabbitmq_stream_common/Makefile b/deps/rabbitmq_stream_common/Makefile index 914a868f1c7c..a6b7c71ae117 100644 --- a/deps/rabbitmq_stream_common/Makefile +++ b/deps/rabbitmq_stream_common/Makefile @@ -7,13 +7,12 @@ define PROJECT_ENV endef -DEPS = +DEPS = osiris TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers -PLT_APPS = osiris - DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk +# We do not depend on rabbit therefore can't run the broker. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk include ../../rabbitmq-components.mk include ../../erlang.mk diff --git a/deps/rabbitmq_trust_store/Makefile b/deps/rabbitmq_trust_store/Makefile index 77440b74080d..58b73990da58 100644 --- a/deps/rabbitmq_trust_store/Makefile +++ b/deps/rabbitmq_trust_store/Makefile @@ -10,7 +10,7 @@ define PROJECT_ENV endef DEPS = rabbit_common rabbit -LOCAL_DEPS += ssl crypto public_key inets +LOCAL_DEPS = ssl crypto public_key inets ## We need the Cowboy's test utilities TEST_DEPS = rabbitmq_ct_helpers rabbitmq_ct_client_helpers amqp_client ct_helper trust_store_http dep_ct_helper = git https://github.com/extend/ct_helper.git master diff --git a/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl b/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl index 5e0aee535451..a5f0e59dbaf8 100644 --- a/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl +++ b/deps/rabbitmq_trust_store/src/rabbit_trust_store_http_provider.erl @@ -65,8 +65,8 @@ join_url(BaseUrl, CertPath) -> string:strip(rabbit_data_coercion:to_list(CertPath), left, $/). init(Config) -> - inets:start(httpc, [{profile, ?PROFILE}]), - _ = application:ensure_all_started(ssl), + _ = inets:start(httpc, [{profile, ?PROFILE}]), + {ok, _} = application:ensure_all_started(ssl), Options = proplists:get_value(proxy_options, Config, []), httpc:set_options(Options, ?PROFILE). diff --git a/deps/rabbitmq_web_mqtt/BUILD.bazel b/deps/rabbitmq_web_mqtt/BUILD.bazel index f9561e14ffaf..49b62e9f1aa8 100644 --- a/deps/rabbitmq_web_mqtt/BUILD.bazel +++ b/deps/rabbitmq_web_mqtt/BUILD.bazel @@ -103,11 +103,11 @@ eunit( broker_for_integration_suites() rabbitmq_integration_suite( - name = "config_schema_SUITE", + name = "web_mqtt_config_schema_SUITE", ) rabbitmq_integration_suite( - name = "command_SUITE", + name = "web_mqtt_command_SUITE", additional_beam = [ "test/rabbit_web_mqtt_test_util.beam", ], @@ -117,7 +117,7 @@ rabbitmq_integration_suite( ) rabbitmq_integration_suite( - name = "proxy_protocol_SUITE", + name = "web_mqtt_proxy_protocol_SUITE", additional_beam = [ "test/src/rabbit_ws_test_util.beam", "test/src/rfc6455_client.beam", @@ -125,7 +125,23 @@ rabbitmq_integration_suite( ) rabbitmq_integration_suite( - name = "system_SUITE", + name = "web_mqtt_shared_SUITE", + additional_beam = [ + "test/src/rabbit_ws_test_util.beam", + "test/src/rfc6455_client.beam", + ], +) + +rabbitmq_integration_suite( + name = "web_mqtt_system_SUITE", + additional_beam = [ + "test/src/rabbit_ws_test_util.beam", + "test/src/rfc6455_client.beam", + ], +) + +rabbitmq_integration_suite( + name = "web_mqtt_v5_SUITE", additional_beam = [ "test/src/rabbit_ws_test_util.beam", "test/src/rfc6455_client.beam", diff --git a/deps/rabbitmq_web_mqtt/Makefile b/deps/rabbitmq_web_mqtt/Makefile index 9919e7cb82cd..812d467f1911 100644 --- a/deps/rabbitmq_web_mqtt/Makefile +++ b/deps/rabbitmq_web_mqtt/Makefile @@ -19,7 +19,7 @@ export BUILD_WITHOUT_QUIC LOCAL_DEPS = ssl DEPS = rabbit_common rabbit cowboy rabbitmq_mqtt -TEST_DEPS = emqtt rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_management +TEST_DEPS = emqtt rabbitmq_ct_helpers rabbitmq_ct_client_helpers rabbitmq_management rabbitmq_stomp rabbitmq_consistent_hash_exchange PLT_APPS += rabbitmqctl elixir cowlib @@ -34,3 +34,9 @@ DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk include ../../rabbitmq-components.mk include ../../erlang.mk + +# We are using mqtt_shared_SUITE from rabbitmq_mqtt. +CT_OPTS += -pa ../rabbitmq_mqtt/test/ + +test-build:: + $(verbose) $(MAKE) -C ../rabbitmq_mqtt test-dir diff --git a/deps/rabbitmq_web_mqtt/test/command_SUITE.erl b/deps/rabbitmq_web_mqtt/test/web_mqtt_command_SUITE.erl similarity index 93% rename from deps/rabbitmq_web_mqtt/test/command_SUITE.erl rename to deps/rabbitmq_web_mqtt/test/web_mqtt_command_SUITE.erl index c526d8c4f217..04d50f7fb582 100644 --- a/deps/rabbitmq_web_mqtt/test/command_SUITE.erl +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_command_SUITE.erl @@ -5,7 +5,7 @@ %% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. --module(command_SUITE). +-module(web_mqtt_command_SUITE). -compile([export_all, nowarn_export_all]). -include_lib("eunit/include/eunit.hrl"). @@ -16,6 +16,7 @@ [connect/3, connect/4]). -define(COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListWebMqttConnectionsCommand'). +-define(MQTT_COMMAND, 'Elixir.RabbitMQ.CLI.Ctl.Commands.ListMqttConnectionsCommand'). all() -> [ @@ -93,12 +94,16 @@ run(BaseConfig) -> [] = 'Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts)), %% Open a WebMQTT connection - C2 = connect(<<"simpleWebMqttClient">>, Config, [{ack_timeout, 1}]), timer:sleep(200), + %% WebMQTT CLI should list only WebMQTT connection. [[{client_id, <<"simpleWebMqttClient">>}]] = - 'Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts)), + 'Elixir.Enum':to_list(?COMMAND:run([<<"client_id">>], Opts)), + + %% MQTT CLI should list only MQTT connection. + [[{client_id, <<"simpleMqttClient">>}]] = + 'Elixir.Enum':to_list(?MQTT_COMMAND:run([<<"client_id">>], Opts)), C3 = connect(<<"simpleWebMqttClient1">>, Config, [{ack_timeout, 1}]), timer:sleep(200), diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE.erl similarity index 97% rename from deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl rename to deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE.erl index 694d7ea5a25a..7b280eccfc1b 100644 --- a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE.erl +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE.erl @@ -5,7 +5,7 @@ %% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% --module(config_schema_SUITE). +-module(web_mqtt_config_schema_SUITE). -compile(export_all). diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cacert.pem b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem similarity index 100% rename from deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cacert.pem rename to deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cert.pem b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/cert.pem similarity index 100% rename from deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/cert.pem rename to deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/cert.pem diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/key.pem b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/key.pem similarity index 100% rename from deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/certs/key.pem rename to deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/certs/key.pem diff --git a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/rabbitmq_web_mqtt.snippets similarity index 80% rename from deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets rename to deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/rabbitmq_web_mqtt.snippets index ab6735cbc830..4d592eee3124 100644 --- a/deps/rabbitmq_web_mqtt/test/config_schema_SUITE_data/rabbitmq_web_mqtt.snippets +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_config_schema_SUITE_data/rabbitmq_web_mqtt.snippets @@ -73,18 +73,18 @@ {ssl_with_listener, "web_mqtt.ssl.listener = 127.0.0.2:15671 web_mqtt.ssl.backlog = 1024 - web_mqtt.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem - web_mqtt.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem - web_mqtt.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem + web_mqtt.ssl.certfile = test/web_mqtt_config_schema_SUITE_data/certs/cert.pem + web_mqtt.ssl.keyfile = test/web_mqtt_config_schema_SUITE_data/certs/key.pem + web_mqtt.ssl.cacertfile = test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem web_mqtt.ssl.password = changeme", [{rabbitmq_web_mqtt, [{ssl_config, [{ip,"127.0.0.2"}, {port,15671}, {backlog,1024}, - {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, - {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, - {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile,"test/web_mqtt_config_schema_SUITE_data/certs/cert.pem"}, + {keyfile,"test/web_mqtt_config_schema_SUITE_data/certs/key.pem"}, + {cacertfile,"test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem"}, {password,<<"changeme">>}]}]}], [rabbitmq_web_mqtt]}, @@ -92,9 +92,9 @@ "web_mqtt.ssl.ip = 127.0.0.2 web_mqtt.ssl.port = 15671 web_mqtt.ssl.backlog = 1024 - web_mqtt.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem - web_mqtt.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem - web_mqtt.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem + web_mqtt.ssl.certfile = test/web_mqtt_config_schema_SUITE_data/certs/cert.pem + web_mqtt.ssl.keyfile = test/web_mqtt_config_schema_SUITE_data/certs/key.pem + web_mqtt.ssl.cacertfile = test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem web_mqtt.ssl.password = changeme web_mqtt.ssl.versions.tls1_2 = tlsv1.2 @@ -105,9 +105,9 @@ {ip,"127.0.0.2"}, {port,15671}, {backlog,1024}, - {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, - {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, - {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile,"test/web_mqtt_config_schema_SUITE_data/certs/cert.pem"}, + {keyfile,"test/web_mqtt_config_schema_SUITE_data/certs/key.pem"}, + {cacertfile,"test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem"}, {password,<<"changeme">>}, {versions,['tlsv1.2','tlsv1.1']} @@ -117,9 +117,9 @@ {ssl_ciphers, "web_mqtt.ssl.port = 15671 web_mqtt.ssl.backlog = 1024 - web_mqtt.ssl.certfile = test/config_schema_SUITE_data/certs/cert.pem - web_mqtt.ssl.keyfile = test/config_schema_SUITE_data/certs/key.pem - web_mqtt.ssl.cacertfile = test/config_schema_SUITE_data/certs/cacert.pem + web_mqtt.ssl.certfile = test/web_mqtt_config_schema_SUITE_data/certs/cert.pem + web_mqtt.ssl.keyfile = test/web_mqtt_config_schema_SUITE_data/certs/key.pem + web_mqtt.ssl.cacertfile = test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem web_mqtt.ssl.password = changeme web_mqtt.ssl.honor_cipher_order = true @@ -142,9 +142,9 @@ [{ssl_config, [{port,15671}, {backlog,1024}, - {certfile,"test/config_schema_SUITE_data/certs/cert.pem"}, - {keyfile,"test/config_schema_SUITE_data/certs/key.pem"}, - {cacertfile,"test/config_schema_SUITE_data/certs/cacert.pem"}, + {certfile,"test/web_mqtt_config_schema_SUITE_data/certs/cert.pem"}, + {keyfile,"test/web_mqtt_config_schema_SUITE_data/certs/key.pem"}, + {cacertfile,"test/web_mqtt_config_schema_SUITE_data/certs/cacert.pem"}, {password,<<"changeme">>}, {honor_cipher_order, true}, diff --git a/deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl b/deps/rabbitmq_web_mqtt/test/web_mqtt_proxy_protocol_SUITE.erl similarity index 99% rename from deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl rename to deps/rabbitmq_web_mqtt/test/web_mqtt_proxy_protocol_SUITE.erl index d13426342d30..7f9e9adb2f8d 100644 --- a/deps/rabbitmq_web_mqtt/test/proxy_protocol_SUITE.erl +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_proxy_protocol_SUITE.erl @@ -5,7 +5,7 @@ %% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% --module(proxy_protocol_SUITE). +-module(web_mqtt_proxy_protocol_SUITE). -compile([export_all, nowarn_export_all]). diff --git a/deps/rabbitmq_web_mqtt/test/web_mqtt_shared_SUITE.erl b/deps/rabbitmq_web_mqtt/test/web_mqtt_shared_SUITE.erl new file mode 100644 index 000000000000..f3818b34ee06 --- /dev/null +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_shared_SUITE.erl @@ -0,0 +1,101 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +%% This test suite uses test cases shared by rabbitmq_mqtt. +-module(web_mqtt_shared_SUITE). +-compile([export_all, + nowarn_export_all]). + +all() -> + mqtt_shared_SUITE:all(). + +groups() -> + mqtt_shared_SUITE:groups(). + +suite() -> + mqtt_shared_SUITE:suite(). + +init_per_suite(Config) -> + mqtt_shared_SUITE:init_per_suite(Config). + +end_per_suite(Config) -> + mqtt_shared_SUITE:end_per_suite(Config). + +init_per_group(mqtt, Config) -> + %% This is the main difference with rabbitmq_mqtt. + rabbit_ct_helpers:set_config(Config, {websocket, true}); +init_per_group(Group, Config) -> + mqtt_shared_SUITE:init_per_group(Group, Config). + +end_per_group(Group, Config) -> + mqtt_shared_SUITE:end_per_group(Group, Config). + +init_per_testcase(Testcase, Config) -> + mqtt_shared_SUITE:init_per_testcase(Testcase, Config). + +end_per_testcase(Testcase, Config) -> + mqtt_shared_SUITE:end_per_testcase(Testcase, Config). + +global_counters(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +message_size_metrics(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +block_only_publisher(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +many_qos1_messages(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +session_expiry(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +cli_close_all_connections(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +cli_close_all_user_connections(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +management_plugin_connection(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +management_plugin_enable(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +disconnect(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +pubsub_shared_connection(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +pubsub_separate_connections(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +will_with_disconnect(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +will_without_disconnect(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +decode_basic_properties(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +quorum_queue_rejects(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +events(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +internal_event_handler(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +non_clean_sess_reconnect_qos1(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +non_clean_sess_reconnect_qos0(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +non_clean_sess_reconnect_qos0_and_qos1(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +non_clean_sess_empty_client_id(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +subscribe_same_topic_same_qos(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +subscribe_same_topic_different_qos(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +subscribe_multiple(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +large_message_mqtt_to_mqtt(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +large_message_amqp_to_mqtt(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +keepalive(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +keepalive_turned_off(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +block(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +amqp_to_mqtt_qos0(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +clean_session_disconnect_client(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +clean_session_node_restart(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +clean_session_node_kill(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +rabbit_status_connection_count(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +trace(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +trace_large_message(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +max_packet_size_unauthenticated(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +max_packet_size_authenticated(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +default_queue_type(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +incoming_message_interceptors(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +utf8(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +retained_message_conversion(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +bind_exchange_to_exchange(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +bind_exchange_to_exchange_single_message(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +pubsub(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +queue_down_qos1(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +consuming_classic_queue_down(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +flow_quorum_queue(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +flow_stream(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +rabbit_mqtt_qos0_queue(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +rabbit_mqtt_qos0_queue_kill_node(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +cli_list_queues(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +delete_create_queue(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +session_reconnect(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +session_takeover(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +duplicate_client_id(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +publish_to_all_queue_types_qos0(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +publish_to_all_queue_types_qos1(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). +maintenance(Config) -> mqtt_shared_SUITE:?FUNCTION_NAME(Config). diff --git a/deps/rabbitmq_web_mqtt/test/system_SUITE.erl b/deps/rabbitmq_web_mqtt/test/web_mqtt_system_SUITE.erl similarity index 99% rename from deps/rabbitmq_web_mqtt/test/system_SUITE.erl rename to deps/rabbitmq_web_mqtt/test/web_mqtt_system_SUITE.erl index 35af6e923d28..3b01af7f1e06 100644 --- a/deps/rabbitmq_web_mqtt/test/system_SUITE.erl +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_system_SUITE.erl @@ -5,7 +5,7 @@ %% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. %% --module(system_SUITE). +-module(web_mqtt_system_SUITE). -include_lib("eunit/include/eunit.hrl"). diff --git a/deps/rabbitmq_web_mqtt/test/web_mqtt_v5_SUITE.erl b/deps/rabbitmq_web_mqtt/test/web_mqtt_v5_SUITE.erl new file mode 100644 index 000000000000..5012ddd4d0b8 --- /dev/null +++ b/deps/rabbitmq_web_mqtt/test/web_mqtt_v5_SUITE.erl @@ -0,0 +1,114 @@ +%% This Source Code Form is subject to the terms of the Mozilla Public +%% License, v. 2.0. If a copy of the MPL was not distributed with this +%% file, You can obtain one at https://mozilla.org/MPL/2.0/. +%% +%% Copyright (c) 2007-2024 Broadcom. All Rights Reserved. The term “Broadcom” refers to Broadcom Inc. and/or its subsidiaries. All rights reserved. + +%% This test suite uses test cases shared by rabbitmq_mqtt. +-module(web_mqtt_v5_SUITE). +-compile([export_all, + nowarn_export_all]). + +all() -> + v5_SUITE:all(). + +groups() -> + v5_SUITE:groups(). + +suite() -> + v5_SUITE:suite(). + +init_per_suite(Config) -> + v5_SUITE:init_per_suite(Config). + +end_per_suite(Config) -> + v5_SUITE:end_per_suite(Config). + +init_per_group(mqtt, Config) -> + %% This is the main difference with rabbitmq_mqtt. + rabbit_ct_helpers:set_config(Config, {websocket, true}); +init_per_group(Group, Config) -> + v5_SUITE:init_per_group(Group, Config). + +end_per_group(Group, Config) -> + v5_SUITE:end_per_group(Group, Config). + +init_per_testcase(Testcase, Config) -> + v5_SUITE:init_per_testcase(Testcase, Config). + +end_per_testcase(Testcase, Config) -> + v5_SUITE:end_per_testcase(Testcase, Config). + +client_set_max_packet_size_publish(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +client_set_max_packet_size_connack(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +client_set_max_packet_size_invalid(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +message_expiry(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +message_expiry_will_message(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +message_expiry_retained_message(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_classic_queue_disconnect_decrease(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_quorum_queue_disconnect_decrease(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_disconnect_zero_to_non_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_disconnect_non_zero_to_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_disconnect_infinity_to_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_disconnect_to_infinity(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_reconnect_non_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_reconnect_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_expiry_reconnect_infinity_to_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +client_publish_qos2(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +client_rejects_publish(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +client_receive_maximum_min(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +client_receive_maximum_large(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +unsubscribe_success(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +unsubscribe_topic_not_found(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_option_no_local(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_option_no_local_wildcards(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_option_retain_as_published(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_option_retain_as_published_wildcards(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_option_retain_handling(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_identifier(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_identifier_amqp091(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_identifier_at_most_once_dead_letter(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +at_most_once_dead_letter_detect_cycle(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_options_persisted(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_options_modify(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_options_modify_qos1(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +subscription_options_modify_qos0(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_upgrade_v3_v5_qos1(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_upgrade_v3_v5_qos0(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_upgrade_v3_v5_amqp091_pub(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +compatibility_v3_v5(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_upgrade_v3_v5_unsubscribe(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_upgrade_v4_v5_no_queue_bind_permission(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +amqp091_cc_header(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +publish_property_content_type(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +publish_property_payload_format_indicator(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +publish_property_response_topic_correlation_data(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +publish_property_user_property(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +disconnect_with_will(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_qos2(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_greater_than_session_expiry(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_less_than_session_expiry(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_equals_session_expiry(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_session_expiry_zero(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_reconnect_no_will(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_reconnect_with_will(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_session_takeover(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_message_expiry(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_message_expiry_publish_properties(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_properties(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_properties(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +retain_properties(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_client_to_server(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_server_to_client(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_bidirectional(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_invalid(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_unknown(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_disallowed(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_retained_message(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +topic_alias_disallowed_retained_message(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +extended_auth(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +headers_exchange(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +consistent_hash_exchange(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_migrate_v3_v5(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +session_takeover_v3_v5(Config) -> v5_SUITE:?FUNCTION_NAME(Config). +will_delay_node_restart(Config) -> v5_SUITE:?FUNCTION_NAME(Config). diff --git a/deps/trust_store_http/Makefile b/deps/trust_store_http/Makefile index 341d187df719..fa7c17d9ac6e 100644 --- a/deps/trust_store_http/Makefile +++ b/deps/trust_store_http/Makefile @@ -10,7 +10,8 @@ LOCAL_DEPS = ssl DEPS = cowboy thoas DEP_EARLY_PLUGINS = rabbit_common/mk/rabbitmq-early-plugin.mk -DEP_PLUGINS = rabbit_common/mk/rabbitmq-plugin.mk +# We do not depend on rabbit therefore can't run the broker. +DEP_PLUGINS = rabbit_common/mk/rabbitmq-build.mk include ../../rabbitmq-components.mk include ../../erlang.mk diff --git a/deps/trust_store_http/src/trust_store_http.erl b/deps/trust_store_http/src/trust_store_http.erl index d32b647b547c..315196f8b042 100644 --- a/deps/trust_store_http/src/trust_store_http.erl +++ b/deps/trust_store_http/src/trust_store_http.erl @@ -5,8 +5,8 @@ main([]) -> io:format("~nStarting trust store server ~n", []), - application:ensure_all_started(trust_store_http), + {ok, _} = application:ensure_all_started(trust_store_http), io:format("~nTrust store server started on port ~tp ~n", [application:get_env(trust_store_http, port, undefined)]), user_drv:start(), - timer:sleep(infinity). \ No newline at end of file + timer:sleep(infinity). diff --git a/deps/trust_store_http/src/trust_store_http_app.erl b/deps/trust_store_http/src/trust_store_http_app.erl index 2fd861405a51..84a2b7e83d0a 100644 --- a/deps/trust_store_http/src/trust_store_http_app.erl +++ b/deps/trust_store_http/src/trust_store_http_app.erl @@ -15,7 +15,7 @@ start(_Type, _Args) -> {"/certs/[...]", cowboy_static, {dir, Directory, [{mimetypes, {<<"text">>, <<"html">>, []}}]}}]} ]), - case get_ssl_options() of + _ = case get_ssl_options() of undefined -> start_http(Dispatch, Port); SslOptions -> start_https(Dispatch, Port, SslOptions) end, diff --git a/deps/trust_store_http/src/trust_store_list_handler.erl b/deps/trust_store_http/src/trust_store_list_handler.erl index a09bf0306cfe..416dfc253d99 100644 --- a/deps/trust_store_http/src/trust_store_list_handler.erl +++ b/deps/trust_store_http/src/trust_store_list_handler.erl @@ -25,7 +25,7 @@ respond(Files, Req, State) -> respond_error(Reason, Req, State) -> Error = io_lib:format("Error listing certificates ~tp", [Reason]), logger:log(error, "~ts", [Error]), - Req2 = cowboy_req:reply(500, [], iolist_to_binary(Error), Req), + Req2 = cowboy_req:reply(500, #{}, iolist_to_binary(Error), Req), {ok, Req2, State}. json_encode(Files) -> @@ -40,7 +40,6 @@ cert_id(FileName, FileDate, FileHash) -> cert_path(FileName) -> iolist_to_binary(["/certs/", FileName]). --spec list_files(string()) -> [{string(), file:date_time(), integer()}]. list_files(Directory) -> case file:list_dir(Directory) of {ok, FileNames} -> diff --git a/moduleindex.yaml b/moduleindex.yaml index 24eaed75577c..c0809c1f9156 100755 --- a/moduleindex.yaml +++ b/moduleindex.yaml @@ -863,6 +863,9 @@ rabbitmq_consistent_hash_exchange: rabbitmq_ct_client_helpers: - rabbit_ct_client_helpers rabbitmq_ct_helpers: +- ct_master_event_fork +- ct_master_fork +- ct_master_logs_fork - cth_log_redirect_any_domains - rabbit_control_helper - rabbit_ct_broker_helpers diff --git a/rabbitmq-components.mk b/rabbitmq-components.mk index 10d9e7d108c8..8dfdaed0664a 100644 --- a/rabbitmq-components.mk +++ b/rabbitmq-components.mk @@ -6,108 +6,34 @@ ifeq ($(.DEFAULT_GOAL),) endif # PROJECT_VERSION defaults to: -# 1. the version exported by rabbitmq-server-release; +# 1. the version exported by environment; # 2. the version stored in `git-revisions.txt`, if it exists; # 3. a version based on git-describe(1), if it is a Git clone; # 4. 0.0.0 +# +# Note that in the case where git-describe(1) is used +# (e.g. during development), running "git gc" may help +# improve the performance. PROJECT_VERSION := $(RABBITMQ_VERSION) ifeq ($(PROJECT_VERSION),) +ifneq ($(wildcard git-revisions.txt),) PROJECT_VERSION = $(shell \ -if test -f git-revisions.txt; then \ head -n1 git-revisions.txt | \ - awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}'; \ -else \ + awk '{print $$$(words $(PROJECT_DESCRIPTION) version);}') +else +PROJECT_VERSION = $(shell \ (git describe --dirty --abbrev=7 --tags --always --first-parent \ - 2>/dev/null || echo rabbitmq_v0_0_0) | \ - sed -e 's/^rabbitmq_v//' -e 's/^v//' -e 's/_/./g' -e 's/-/+/' \ - -e 's/-/./g'; \ -fi) + 2>/dev/null || echo 0.0.0) | \ + sed -e 's/^v//' -e 's/_/./g' -e 's/-/+/' -e 's/-/./g') +endif endif # -------------------------------------------------------------------- # RabbitMQ components. # -------------------------------------------------------------------- -# For RabbitMQ repositories, we want to checkout branches which match -# the parent project. For instance, if the parent project is on a -# release tag, dependencies must be on the same release tag. If the -# parent project is on a topic branch, dependencies must be on the same -# topic branch or fallback to `stable` or `main` whichever was the -# base of the topic branch. - -dep_amqp_client = git_rmq-subfolder rabbitmq-erlang-client $(current_rmq_ref) $(base_rmq_ref) main -dep_amqp10_client = git_rmq-subfolder rabbitmq-amqp1.0-client $(current_rmq_ref) $(base_rmq_ref) main -dep_oauth2_client = git_rmq-subfolder oauth2-client $(current_rmq_ref) $(base_rmq_ref) main -dep_amqp10_common = git_rmq-subfolder rabbitmq-amqp1.0-common $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbit = git_rmq-subfolder rabbitmq-server $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbit_common = git_rmq-subfolder rabbitmq-common $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_amqp1_0 = git_rmq-subfolder rabbitmq-amqp1.0 $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_auth_backend_amqp = git_rmq rabbitmq-auth-backend-amqp $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_auth_backend_cache = git_rmq-subfolder rabbitmq-auth-backend-cache $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_auth_backend_http = git_rmq-subfolder rabbitmq-auth-backend-http $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_auth_backend_ldap = git_rmq-subfolder rabbitmq-auth-backend-ldap $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_auth_backend_oauth2 = git_rmq-subfolder rabbitmq-auth-backend-oauth2 $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_auth_mechanism_ssl = git_rmq-subfolder rabbitmq-auth-mechanism-ssl $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_aws = git_rmq-subfolder rabbitmq-aws $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_boot_steps_visualiser = git_rmq rabbitmq-boot-steps-visualiser $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_cli = git_rmq-subfolder rabbitmq-cli $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_codegen = git_rmq-subfolder rabbitmq-codegen $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_consistent_hash_exchange = git_rmq-subfolder rabbitmq-consistent-hash-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_ct_client_helpers = git_rmq-subfolder rabbitmq-ct-client-helpers $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_ct_helpers = git_rmq-subfolder rabbitmq-ct-helpers $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_delayed_message_exchange = git_rmq rabbitmq-delayed-message-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_dotnet_client = git_rmq rabbitmq-dotnet-client $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_event_exchange = git_rmq-subfolder rabbitmq-event-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_federation = git_rmq-subfolder rabbitmq-federation $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_federation_management = git_rmq-subfolder rabbitmq-federation-management $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_federation_prometheus = git_rmq-subfolder rabbitmq-federation-prometheus $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_java_client = git_rmq rabbitmq-java-client $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_jms_client = git_rmq rabbitmq-jms-client $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_jms_cts = git_rmq rabbitmq-jms-cts $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_jms_topic_exchange = git_rmq-subfolder rabbitmq-jms-topic-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_lvc_exchange = git_rmq rabbitmq-lvc-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_management = git_rmq-subfolder rabbitmq-management $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_management_agent = git_rmq-subfolder rabbitmq-management-agent $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_management_exchange = git_rmq rabbitmq-management-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_management_themes = git_rmq rabbitmq-management-themes $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_message_timestamp = git_rmq rabbitmq-message-timestamp $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_metronome = git_rmq rabbitmq-metronome $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_mqtt = git_rmq-subfolder rabbitmq-mqtt $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_objc_client = git_rmq rabbitmq-objc-client $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_peer_discovery_aws = git_rmq-subfolder rabbitmq-peer-discovery-aws $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_peer_discovery_common = git_rmq-subfolder rabbitmq-peer-discovery-common $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_peer_discovery_consul = git_rmq-subfolder rabbitmq-peer-discovery-consul $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_peer_discovery_etcd = git_rmq-subfolder rabbitmq-peer-discovery-etcd $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_peer_discovery_k8s = git_rmq-subfolder rabbitmq-peer-discovery-k8s $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_prelaunch = git_rmq-subfolder rabbitmq-prelaunch $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_prometheus = git_rmq-subfolder rabbitmq-prometheus $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_random_exchange = git_rmq-subfolder rabbitmq-random-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_recent_history_exchange = git_rmq-subfolder rabbitmq-recent-history-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_routing_node_stamp = git_rmq rabbitmq-routing-node-stamp $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_rtopic_exchange = git_rmq rabbitmq-rtopic-exchange $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_sharding = git_rmq-subfolder rabbitmq-sharding $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_shovel = git_rmq-subfolder rabbitmq-shovel $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_shovel_management = git_rmq-subfolder rabbitmq-shovel-management $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_shovel_prometheus = git_rmq-subfolder rabbitmq-shovel-prometheus $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_stomp = git_rmq-subfolder rabbitmq-stomp $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_stream = git_rmq-subfolder rabbitmq-stream $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_stream_common = git_rmq-subfolder rabbitmq-stream-common $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_stream_management = git_rmq-subfolder rabbitmq-stream-management $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_toke = git_rmq rabbitmq-toke $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_top = git_rmq-subfolder rabbitmq-top $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_tracing = git_rmq-subfolder rabbitmq-tracing $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_trust_store = git_rmq-subfolder rabbitmq-trust-store $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_test = git_rmq rabbitmq-test $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_web_dispatch = git_rmq-subfolder rabbitmq-web-dispatch $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_web_stomp = git_rmq-subfolder rabbitmq-web-stomp $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_web_stomp_examples = git_rmq-subfolder rabbitmq-web-stomp-examples $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_web_mqtt = git_rmq-subfolder rabbitmq-web-mqtt $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_web_mqtt_examples = git_rmq-subfolder rabbitmq-web-mqtt-examples $(current_rmq_ref) $(base_rmq_ref) main -dep_rabbitmq_website = git_rmq rabbitmq-website $(current_rmq_ref) $(base_rmq_ref) live main -dep_toke = git_rmq toke $(current_rmq_ref) $(base_rmq_ref) master - # Third-party dependencies version pinning. # # We do that in this file, which is included by all projects, to ensure @@ -128,80 +54,102 @@ dep_ra = hex 2.14.0 dep_ranch = hex 2.1.0 dep_recon = hex 2.5.6 dep_redbug = hex 2.1.0 +dep_systemd = hex 0.6.1 dep_thoas = hex 1.2.1 dep_observer_cli = hex 1.7.5 dep_seshat = git https://github.com/rabbitmq/seshat v0.6.1 dep_stdout_formatter = hex 0.2.4 dep_sysmon_handler = hex 1.3.0 -RABBITMQ_COMPONENTS = amqp_client \ - amqp10_common \ - amqp10_client \ - oauth2_client \ - rabbit \ - rabbit_common \ - rabbitmq_amqp1_0 \ - rabbitmq_auth_backend_amqp \ - rabbitmq_auth_backend_cache \ - rabbitmq_auth_backend_http \ - rabbitmq_auth_backend_ldap \ - rabbitmq_auth_backend_oauth2 \ - rabbitmq_auth_mechanism_ssl \ - rabbitmq_aws \ - rabbitmq_boot_steps_visualiser \ - rabbitmq_cli \ - rabbitmq_codegen \ - rabbitmq_consistent_hash_exchange \ - rabbitmq_ct_client_helpers \ - rabbitmq_ct_helpers \ - rabbitmq_delayed_message_exchange \ - rabbitmq_dotnet_client \ - rabbitmq_event_exchange \ - rabbitmq_federation \ - rabbitmq_federation_management \ - rabbitmq_federation_prometheus \ - rabbitmq_java_client \ - rabbitmq_jms_client \ - rabbitmq_jms_cts \ - rabbitmq_jms_topic_exchange \ - rabbitmq_lvc_exchange \ - rabbitmq_management \ - rabbitmq_management_agent \ - rabbitmq_management_exchange \ - rabbitmq_management_themes \ - rabbitmq_message_timestamp \ - rabbitmq_metronome \ - rabbitmq_mqtt \ - rabbitmq_objc_client \ - rabbitmq_peer_discovery_aws \ - rabbitmq_peer_discovery_common \ - rabbitmq_peer_discovery_consul \ - rabbitmq_peer_discovery_etcd \ - rabbitmq_peer_discovery_k8s \ - rabbitmq_prometheus \ - rabbitmq_random_exchange \ - rabbitmq_recent_history_exchange \ - rabbitmq_routing_node_stamp \ - rabbitmq_rtopic_exchange \ - rabbitmq_server_release \ - rabbitmq_sharding \ - rabbitmq_shovel \ - rabbitmq_shovel_management \ - rabbitmq_shovel_prometheus \ - rabbitmq_stomp \ - rabbitmq_stream \ - rabbitmq_stream_common \ - rabbitmq_stream_management \ - rabbitmq_toke \ - rabbitmq_top \ - rabbitmq_tracing \ - rabbitmq_trust_store \ - rabbitmq_web_dispatch \ - rabbitmq_web_mqtt \ - rabbitmq_web_mqtt_examples \ - rabbitmq_web_stomp \ - rabbitmq_web_stomp_examples \ - rabbitmq_website +# RabbitMQ applications found in the monorepo. +# +# Note that rabbitmq_server_release is not a real application +# but is the name used in the top-level Makefile. + +RABBITMQ_BUILTIN = \ + amqp10_client \ + amqp10_common \ + amqp_client \ + oauth2_client \ + rabbit \ + rabbit_common \ + rabbitmq_amqp1_0 \ + rabbitmq_amqp_client \ + rabbitmq_auth_backend_cache \ + rabbitmq_auth_backend_http \ + rabbitmq_auth_backend_ldap \ + rabbitmq_auth_backend_oauth2 \ + rabbitmq_auth_mechanism_ssl \ + rabbitmq_aws \ + rabbitmq_cli \ + rabbitmq_codegen \ + rabbitmq_consistent_hash_exchange \ + rabbitmq_ct_client_helpers \ + rabbitmq_ct_helpers \ + rabbitmq_event_exchange \ + rabbitmq_federation \ + rabbitmq_federation_management \ + rabbitmq_federation_prometheus \ + rabbitmq_jms_topic_exchange \ + rabbitmq_management \ + rabbitmq_management_agent \ + rabbitmq_mqtt \ + rabbitmq_peer_discovery_aws \ + rabbitmq_peer_discovery_common \ + rabbitmq_peer_discovery_consul \ + rabbitmq_peer_discovery_etcd \ + rabbitmq_peer_discovery_k8s \ + rabbitmq_prelaunch \ + rabbitmq_prometheus \ + rabbitmq_random_exchange \ + rabbitmq_recent_history_exchange \ + rabbitmq_server_release \ + rabbitmq_sharding \ + rabbitmq_shovel \ + rabbitmq_shovel_management \ + rabbitmq_stomp \ + rabbitmq_stream \ + rabbitmq_stream_common \ + rabbitmq_stream_management \ + rabbitmq_top \ + rabbitmq_tracing \ + rabbitmq_trust_store \ + rabbitmq_web_dispatch \ + rabbitmq_web_mqtt \ + rabbitmq_web_mqtt_examples \ + rabbitmq_web_stomp \ + rabbitmq_web_stomp_examples \ + trust_store_http + +# Applications outside of the monorepo maintained by Team RabbitMQ. + +RABBITMQ_COMMUNITY = \ + rabbitmq_auth_backend_amqp \ + rabbitmq_boot_steps_visualiser \ + rabbitmq_delayed_message_exchange \ + rabbitmq_lvc_exchange \ + rabbitmq_management_exchange \ + rabbitmq_management_themes \ + rabbitmq_message_timestamp \ + rabbitmq_metronome \ + rabbitmq_routing_node_stamp \ + rabbitmq_rtopic_exchange + +community_dep = git git@github.com:rabbitmq/$1.git $(if $2,$2,main) +dep_rabbitmq_auth_backend_amqp = $(call community_dep,rabbitmq-auth-backend-amqp) +dep_rabbitmq_boot_steps_visualiser = $(call community_dep,rabbitmq-boot-steps-visualiser,master) +dep_rabbitmq_delayed_message_exchange = $(call community_dep,rabbitmq-delayed-message-exchange) +dep_rabbitmq_lvc_exchange = $(call community_dep,rabbitmq-lvc-exchange) +dep_rabbitmq_management_exchange = $(call community_dep,rabbitmq-management-exchange) +dep_rabbitmq_management_themes = $(call community_dep,rabbitmq-management-themes,master) +dep_rabbitmq_message_timestamp = $(call community_dep,rabbitmq-message-timestamp) +dep_rabbitmq_metronome = $(call community_dep,rabbitmq-metronome,master) +dep_rabbitmq_routing_node_stamp = $(call community_dep,rabbitmq-routing-node-stamp) +dep_rabbitmq_rtopic_exchange = $(call community_dep,rabbitmq-rtopic-exchange) + +# All RabbitMQ applications. + +RABBITMQ_COMPONENTS = $(RABBITMQ_BUILTIN) $(RABBITMQ_COMMUNITY) # Erlang.mk does not rebuild dependencies by default, once they were # compiled once, except for those listed in the `$(FORCE_REBUILD)` @@ -212,137 +160,10 @@ RABBITMQ_COMPONENTS = amqp_client \ FORCE_REBUILD = $(RABBITMQ_COMPONENTS) -# Several components have a custom erlang.mk/build.config, mainly -# to disable eunit. Therefore, we can't use the top-level project's -# erlang.mk copy. -NO_AUTOPATCH += $(RABBITMQ_COMPONENTS) - -ifeq ($(origin current_rmq_ref),undefined) -ifneq ($(wildcard .git),) -current_rmq_ref := $(shell (\ - ref=$$(LANG=C git branch --list | awk '/^\* \(.*detached / {ref=$$0; sub(/.*detached [^ ]+ /, "", ref); sub(/\)$$/, "", ref); print ref; exit;} /^\* / {ref=$$0; sub(/^\* /, "", ref); print ref; exit}');\ - if test "$$(git rev-parse --short HEAD)" != "$$ref"; then echo "$$ref"; fi)) -else -current_rmq_ref := main -endif -endif -export current_rmq_ref - -ifeq ($(origin base_rmq_ref),undefined) -ifneq ($(wildcard .git),) -possible_base_rmq_ref := main -ifeq ($(possible_base_rmq_ref),$(current_rmq_ref)) -base_rmq_ref := $(current_rmq_ref) -else -base_rmq_ref := $(shell \ - (git rev-parse --verify -q main >/dev/null && \ - git rev-parse --verify -q $(possible_base_rmq_ref) >/dev/null && \ - git merge-base --is-ancestor $$(git merge-base main HEAD) $(possible_base_rmq_ref) && \ - echo $(possible_base_rmq_ref)) || \ - echo main) -endif -else -base_rmq_ref := main -endif -endif -export base_rmq_ref - -# Repository URL selection. -# -# First, we infer other components' location from the current project -# repository URL, if it's a Git repository: -# - We take the "origin" remote URL as the base -# - The current project name and repository name is replaced by the -# target's properties: -# eg. rabbitmq-common is replaced by rabbitmq-codegen -# eg. rabbit_common is replaced by rabbitmq_codegen -# -# If cloning from this computed location fails, we fallback to RabbitMQ -# upstream which is GitHub. - -# Macro to transform eg. "rabbit_common" to "rabbitmq-common". -rmq_cmp_repo_name = $(word 2,$(dep_$(1))) - -# Upstream URL for the current project. -RABBITMQ_COMPONENT_REPO_NAME := $(call rmq_cmp_repo_name,$(PROJECT)) -RABBITMQ_UPSTREAM_FETCH_URL ?= https://github.com/rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git -RABBITMQ_UPSTREAM_PUSH_URL ?= git@github.com:rabbitmq/$(RABBITMQ_COMPONENT_REPO_NAME).git - -# Current URL for the current project. If this is not a Git clone, -# default to the upstream Git repository. -ifneq ($(wildcard .git),) -git_origin_fetch_url := $(shell git config remote.origin.url) -git_origin_push_url := $(shell git config remote.origin.pushurl || git config remote.origin.url) -RABBITMQ_CURRENT_FETCH_URL ?= $(git_origin_fetch_url) -RABBITMQ_CURRENT_PUSH_URL ?= $(git_origin_push_url) -else -RABBITMQ_CURRENT_FETCH_URL ?= $(RABBITMQ_UPSTREAM_FETCH_URL) -RABBITMQ_CURRENT_PUSH_URL ?= $(RABBITMQ_UPSTREAM_PUSH_URL) -endif - -# Macro to replace the following pattern: -# 1. /foo.git -> /bar.git -# 2. /foo -> /bar -# 3. /foo/ -> /bar/ -subst_repo_name = $(patsubst %/$(1)/%,%/$(2)/%,$(patsubst %/$(1),%/$(2),$(patsubst %/$(1).git,%/$(2).git,$(3)))) - -# Macro to replace both the project's name (eg. "rabbit_common") and -# repository name (eg. "rabbitmq-common") by the target's equivalent. -# -# This macro is kept on one line because we don't want whitespaces in -# the returned value, as it's used in $(dep_fetch_git_rmq) in a shell -# single-quoted string. -dep_rmq_repo = $(if $(dep_$(2)),$(call subst_repo_name,$(PROJECT),$(2),$(call subst_repo_name,$(RABBITMQ_COMPONENT_REPO_NAME),$(call rmq_cmp_repo_name,$(2)),$(1))),$(pkg_$(1)_repo)) - -dep_rmq_commits = $(if $(dep_$(1)), \ - $(wordlist 3,$(words $(dep_$(1))),$(dep_$(1))), \ - $(pkg_$(1)_commit)) - -define dep_fetch_git_rmq - fetch_url1='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_FETCH_URL),$(1))'; \ - fetch_url2='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_FETCH_URL),$(1))'; \ - if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \ - git clone -q -n -- "$$$$fetch_url1" $(DEPS_DIR)/$(call dep_name,$(1)); then \ - fetch_url="$$$$fetch_url1"; \ - push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),$(1))'; \ - elif git clone -q -n -- "$$$$fetch_url2" $(DEPS_DIR)/$(call dep_name,$(1)); then \ - fetch_url="$$$$fetch_url2"; \ - push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),$(1))'; \ - fi; \ - cd $(DEPS_DIR)/$(call dep_name,$(1)) && ( \ - $(foreach ref,$(call dep_rmq_commits,$(1)), \ - git checkout -q $(ref) >/dev/null 2>&1 || \ - ) \ - (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \ - 1>&2 && false) ) && \ - (test "$$$$fetch_url" = "$$$$push_url" || \ - git remote set-url --push origin "$$$$push_url") -endef - -define dep_fetch_git_rmq-subfolder - fetch_url1='https://github.com/rabbitmq/rabbitmq-server.git'; \ - fetch_url2='git@github.com:rabbitmq/rabbitmq-server.git'; \ - if [ ! -d $(ERLANG_MK_TMP)/rabbitmq-server ]; then \ - if test "$$$$fetch_url1" != '$(RABBITMQ_CURRENT_FETCH_URL)' && \ - git clone -q -n -- "$$$$fetch_url1" $(ERLANG_MK_TMP)/rabbitmq-server; then \ - fetch_url="$$$$fetch_url1"; \ - push_url='$(call dep_rmq_repo,$(RABBITMQ_CURRENT_PUSH_URL),rabbitmq-server)'; \ - elif git clone -q -n -- "$$$$fetch_url2" $(ERLANG_MK_TMP)/rabbitmq-server; then \ - fetch_url="$$$$fetch_url2"; \ - push_url='$(call dep_rmq_repo,$(RABBITMQ_UPSTREAM_PUSH_URL),rabbitmq-server)'; \ - fi; \ - fi; \ - cd $(ERLANG_MK_TMP)/rabbitmq-server && ( \ - $(foreach ref,$(call dep_rmq_commits,$(1)), \ - git checkout -q $(ref) >/dev/null 2>&1 || \ - ) \ - (echo "error: no valid pathspec among: $(call dep_rmq_commits,$(1))" \ - 1>&2 && false) ) && \ - (test "$$$$fetch_url" = "$$$$push_url" || \ - git remote set-url --push origin "$$$$push_url") - ln -s $(ERLANG_MK_TMP)/rabbitmq-server/deps/$(call dep_name,$(1)) \ - $(DEPS_DIR)/$(call dep_name,$(1)); -endef +# We disable autopatching for community plugins as they sit in +# their own repository and we want to avoid polluting the git +# status with changes that should not be committed. +NO_AUTOPATCH += $(RABBITMQ_COMMUNITY) # -------------------------------------------------------------------- # Component distribution. @@ -355,7 +176,7 @@ prepare-dist:: @: # -------------------------------------------------------------------- -# Monorepo-specific settings. +# RabbitMQ-specific settings. # -------------------------------------------------------------------- # If the top-level project is a RabbitMQ component, we override diff --git a/rabbitmq.bzl b/rabbitmq.bzl index 56d2bfa22484..d0a5b52405fc 100644 --- a/rabbitmq.bzl +++ b/rabbitmq.bzl @@ -191,6 +191,7 @@ def rabbitmq_suite( "COVERDATA_TO_LCOV_APPS_DIRS": "deps:deps/rabbit/apps", }.items() + test_env.items()), deps = [":test_erlang_app"] + deps + runtime_deps, + ct_run_extra_args = ["-kernel net_ticktime 5"], **kwargs ) return name @@ -261,6 +262,7 @@ def rabbitmq_integration_suite( ":rabbitmq-for-tests-run", ] + tools, deps = assumed_deps + deps + runtime_deps, + ct_run_extra_args = ["-kernel net_ticktime 5"], **kwargs ) @@ -296,6 +298,7 @@ def rabbitmq_integration_suite( "@rabbitmq-server-generic-unix-3.13//:rabbitmq-run", ] + tools, deps = assumed_deps + deps + runtime_deps, + ct_run_extra_args = ["-kernel net_ticktime 5"], **kwargs )