Skip to content

Commit fd7221a

Browse files
committed
Fix build warnings coming from deprecated methods
Signed-off-by: Rafal Lal <rafal.lal@tietoevry.com>
1 parent eab6d05 commit fd7221a

File tree

9 files changed

+13
-13
lines changed

9 files changed

+13
-13
lines changed

spec/workload/compatibility_spec.cr

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ describe "Compatibility" do
2020
result = ShellCmd.run_testsuite("cni_compatible verbose")
2121
until (/PASSED/ =~ result[:output]) || retries > retry_limit
2222
Log.info { "cni_compatible spec retry: #{retries}" }
23-
sleep 1.0
23+
sleep 1.seconds
2424
result = ShellCmd.run_testsuite("cni_compatible verbose")
2525
retries = retries + 1
2626
end

spec/workload/configuration_spec.cr

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -88,7 +88,7 @@ describe CnfTestSuite do
8888
result = ShellCmd.run_testsuite("rolling_downgrade verbose")
8989
until (/Passed/ =~ result[:output]) || retries > retry_limit
9090
Log.info { "rolling_downgrade retry: #{retries}" }
91-
sleep 1.0
91+
sleep 1.seconds
9292
result = ShellCmd.run_testsuite("rolling_downgrade verbose")
9393
retries = retries + 1
9494
end

src/tasks/litmus_setup.cr

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -64,15 +64,15 @@ module LitmusManager
6464
def self.get_target_node_to_cordon(deployment_label, deployment_value, namespace)
6565
app_nodeName_cmd = "kubectl get pods -l #{deployment_label}=#{deployment_value} -n #{namespace} -o=jsonpath='{.items[0].spec.nodeName}'"
6666
Log.info { "Getting the operator node name: #{app_nodeName_cmd}" }
67-
status_code = Process.run("#{app_nodeName_cmd}", shell: true, output: appNodeName_response = IO::Memory.new, error: stderr = IO::Memory.new).exit_status
67+
status_code = Process.run("#{app_nodeName_cmd}", shell: true, output: appNodeName_response = IO::Memory.new, error: stderr = IO::Memory.new).exit_code
6868
Log.for("verbose").info { "status_code: #{status_code}" }
6969
appNodeName_response.to_s
7070
end
7171

7272
private def self.get_status_info(chaos_resource, test_name, output_format, namespace) : {Int32, String}
7373
status_cmd = "kubectl get #{chaos_resource}.#{LITMUS_K8S_DOMAIN} #{test_name} -n #{namespace} -o '#{output_format}'"
7474
Log.info { "Getting litmus status info: #{status_cmd}" }
75-
status_code = Process.run("#{status_cmd}", shell: true, output: status_response = IO::Memory.new, error: stderr = IO::Memory.new).exit_status
75+
status_code = Process.run("#{status_cmd}", shell: true, output: status_response = IO::Memory.new, error: stderr = IO::Memory.new).exit_code
7676
status_response = status_response.to_s
7777
Log.info { "status_code: #{status_code}, response: #{status_response}" }
7878
{status_code, status_response}

src/tasks/utils/k8s_tshark.cr

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -116,7 +116,7 @@ module K8sTshark
116116

117117
# Some tshark captures were left in zombie states if only kill/kill -9 was invoked.
118118
ClusterTools.exec_by_node_bg("kill -15 #{@pid}", @node_match.not_nil!)
119-
sleep 1
119+
sleep 1.seconds
120120
ClusterTools.exec_by_node_bg("kill -9 #{@pid}", @node_match.not_nil!)
121121

122122
@pid = nil

src/tasks/utils/timeouts.cr

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ def repeat_with_timeout(timeout, errormsg, reset_on_nil=false, delay=2, &block)
2020
elsif result
2121
return true
2222
end
23-
sleep delay
23+
sleep delay.seconds
2424
Log.for("verbose").info { "Time left: #{timeout - (Time.utc - start_time).to_i} seconds" }
2525
end
2626
Log.error { errormsg }

src/tasks/utils/utils.cr

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -59,7 +59,7 @@ def ensure_kubeconfig!
5959

6060
# Check if cluster is up and running with assigned KUBECONFIG variable
6161
cmd = "kubectl get nodes --kubeconfig=#{ENV["KUBECONFIG"]}"
62-
exit_code = KubectlClient::ShellCmd.run(cmd, "", false)[:status].exit_status
62+
exit_code = KubectlClient::ShellCmd.run(cmd, "", false)[:status].exit_code
6363
if exit_code != 0
6464
stdout_failure "Cluster liveness check failed: '#{cmd}' returned exit code #{exit_code}. Check the cluster and/or KUBECONFIG environment variable."
6565
exit 1

src/tasks/workload/5g_validator.cr

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -115,7 +115,7 @@ end
115115

116116
def smf_up_heartbeat_capture_matches_count(smf_key : String, smf_value : String, command : String)
117117
K8sTshark::TsharkPacketCapture.begin_capture_by_label(smf_key, smf_value, command) do |capture|
118-
sleep 60
118+
sleep 60.seconds
119119
capture.regex_search(/"pfcp\.msg_type": "(1|2)"/).size
120120
end
121121
end
@@ -135,7 +135,7 @@ task "suci_enabled" do |t, args|
135135
K8sTshark::TsharkPacketCapture.begin_capture_by_label(core_key, core_value, command) do |capture|
136136
#todo put in prereq
137137
UERANSIM.install(config)
138-
sleep 30.0
138+
sleep 30.seconds
139139
# TODO 5g RAN (only) mobile traffic check ????
140140
# use suci encyption but don't use a null encryption key
141141
suci_found = capture.regex_match?(/"nas_5gs.mm.type_id": "1"/) && \

src/tasks/workload/microservice.cr

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -424,7 +424,7 @@ task "zombie_handled" do |t, args|
424424
end
425425
end
426426

427-
sleep 10.0
427+
sleep 10.seconds
428428

429429
task_response = CNFManager.workload_resource_test(args, config, check_containers:false ) do |resource, container, initialized|
430430
ClusterTools.all_containers_by_resource?(resource, resource[:namespace], only_container_pids:false) do | container_id, container_pid_on_node, node, container_proctree_statuses, container_status|
@@ -618,7 +618,7 @@ task "sig_term_handled" do |t, args|
618618
Log.for(t.name).info { "pid_log_names: #{pid_log_names}" }
619619
#todo 2.3 parse the logs
620620
#todo get the log
621-
sleep 5
621+
sleep 5.seconds
622622
sig_term_found = pid_log_names.map do |pid_name|
623623
Log.info { "pid_name: #{pid_name}" }
624624
resp = File.read("#{pid_name}")

src/tasks/workload/state.cr

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -263,13 +263,13 @@ task "node_drain", ["install_litmus"] do |t, args|
263263
deployment_label_value="#{spec_labels.as_h.first_value}"
264264
app_nodeName_cmd = "kubectl get pods -l #{deployment_label}=#{deployment_label_value} -n #{resource["namespace"]} -o=jsonpath='{.items[0].spec.nodeName}'"
265265
Log.for("node_drain").info { "Getting the app node name #{app_nodeName_cmd}" } if check_verbose(args)
266-
status_code = Process.run("#{app_nodeName_cmd}", shell: true, output: appNodeName_response = IO::Memory.new, error: stderr = IO::Memory.new).exit_status
266+
status_code = Process.run("#{app_nodeName_cmd}", shell: true, output: appNodeName_response = IO::Memory.new, error: stderr = IO::Memory.new).exit_code
267267
Log.for("node_drain").info { "status_code: #{status_code}" } if check_verbose(args)
268268
app_nodeName = appNodeName_response.to_s
269269

270270
litmus_nodeName_cmd = "kubectl get pods -n litmus -l app.kubernetes.io/name=litmus -o=jsonpath='{.items[0].spec.nodeName}'"
271271
Log.for("node_drain").info { "Getting the app node name #{litmus_nodeName_cmd}" } if check_verbose(args)
272-
status_code = Process.run("#{litmus_nodeName_cmd}", shell: true, output: litmusNodeName_response = IO::Memory.new, error: stderr = IO::Memory.new).exit_status
272+
status_code = Process.run("#{litmus_nodeName_cmd}", shell: true, output: litmusNodeName_response = IO::Memory.new, error: stderr = IO::Memory.new).exit_code
273273
Log.for("node_drain").info { "status_code: #{status_code}" } if check_verbose(args)
274274
litmus_nodeName = litmusNodeName_response.to_s
275275
Log.info { "Workload Node Name: #{app_nodeName}" }

0 commit comments

Comments
 (0)