|
| 1 | +require 'spec_helper' |
| 2 | + |
| 3 | +class ConvergeFetchDnaFiles |
| 4 | + def self.share(chef_run, extra_chef_attribute_location: nil) |
| 5 | + chef_run.converge_dsl('aws-parallelcluster-platform') do |
| 6 | + fetch_dna_files 'share' do |
| 7 | + extra_chef_attribute_location extra_chef_attribute_location |
| 8 | + action :share |
| 9 | + end |
| 10 | + end |
| 11 | + end |
| 12 | + |
| 13 | + def self.cleanup(chef_run) |
| 14 | + chef_run.converge_dsl('aws-parallelcluster-platform') do |
| 15 | + fetch_dna_files 'cleanup' do |
| 16 | + action :cleanup |
| 17 | + end |
| 18 | + end |
| 19 | + end |
| 20 | +end |
| 21 | + |
| 22 | +describe 'fetch_dna_files resource' do |
| 23 | + for_all_oses do |platform, version| |
| 24 | + context "on #{platform}#{version}" do |
| 25 | + cached(:script_dir) { 'SCRIPT_DIR' } |
| 26 | + cached(:shared_dir) { 'SHARED_DIR' } |
| 27 | + cached(:region) { 'REGION' } |
| 28 | + |
| 29 | + context "when we share dna files" do |
| 30 | + cached(:chef_run) do |
| 31 | + runner = runner(platform: platform, version: version, step_into: ['fetch_dna_files']) do |node| |
| 32 | + node.override['cluster']['scripts_dir'] = script_dir |
| 33 | + node.override['cluster']['shared_dir'] = shared_dir |
| 34 | + node.override['cluster']['node_type'] = 'HeadNode' |
| 35 | + node.override['cluster']['region'] = region |
| 36 | + node.override['kitchen'] = true |
| 37 | + end |
| 38 | + ConvergeFetchDnaFiles.share(runner, extra_chef_attribute_location: "#{kitchen_instance_types_data_path}") |
| 39 | + end |
| 40 | + cached(:node) { chef_run.node } |
| 41 | + |
| 42 | + # it "it copies data from /tmp/extra.json" do |
| 43 | + # is_expected.to create_remote_file("copy extra.json") |
| 44 | + # .with(path: "#{shared_dir}/dna/extra.json") |
| 45 | + # .with(source: "file://#{kitchen_instance_types_data_path}") |
| 46 | + # end |
| 47 | + |
| 48 | + it 'runs get_compute_user_data.py to get dna files' do |
| 49 | + is_expected.to run_execute('Share dna.json with ComputeFleet').with( |
| 50 | + command: "#{cookbook_virtualenv_path}/bin/python #{node['cluster']['scripts_dir']}/get_compute_user_data.py" \ |
| 51 | + " --region #{node['cluster']['region']}" |
| 52 | + ) |
| 53 | + end |
| 54 | + end |
| 55 | + |
| 56 | + context "when we cleanup dna files" do |
| 57 | + cached(:chef_run) do |
| 58 | + runner = runner(platform: platform, version: version, step_into: ['fetch_dna_files']) do |node| |
| 59 | + node.override['cluster']['scripts_dir'] = script_dir |
| 60 | + node.override['cluster']['shared_dir'] = shared_dir |
| 61 | + node.override['cluster']['node_type'] = 'HeadNode' |
| 62 | + node.override['cluster']['region'] = region |
| 63 | + end |
| 64 | + allow_any_instance_of(Object).to receive(:aws_domain).and_return(aws_domain) |
| 65 | + ConvergeFetchDnaFiles.cleanup(runner) |
| 66 | + end |
| 67 | + cached(:node) { chef_run.node } |
| 68 | + |
| 69 | + it 'cleanups dna files' do |
| 70 | + is_expected.to run_execute("Cleanup dna.json and extra.json from #{node['cluster']['shared_dir']}/dna").with( |
| 71 | + command: "#{cookbook_virtualenv_path}/bin/python #{node['cluster']['scripts_dir']}/get_compute_user_data.py" \ |
| 72 | + " --region #{node['cluster']['region']} --cleanup" |
| 73 | + ) |
| 74 | + end |
| 75 | + end |
| 76 | + end |
| 77 | + end |
| 78 | +end |
0 commit comments