|
12 | 12 | # See the License for the specific language governing permissions and
|
13 | 13 | # limitations under the License.
|
14 | 14 |
|
15 |
| -from __future__ import (absolute_import, division, print_function) |
| 15 | +from __future__ import absolute_import, division, print_function |
| 16 | + |
16 | 17 | __metaclass__ = type
|
17 | 18 |
|
18 | 19 | DOCUMENTATION = r'''
|
|
36 | 37 | - List of names or globs of UNIX System Services (USS) files,
|
37 | 38 | PS (sequential data sets), PDS, PDSE to compress or archive.
|
38 | 39 | - USS file paths should be absolute paths.
|
| 40 | + - GDS relative notation is supported. |
39 | 41 | - "MVS data sets supported types are: C(SEQ), C(PDS), C(PDSE)."
|
40 | 42 | - VSAMs are not supported.
|
41 | 43 | type: list
|
|
123 | 125 | required: true
|
124 | 126 | exclude:
|
125 | 127 | description:
|
126 |
| - - Remote absolute path, glob, or list of paths, globs or data set name |
127 |
| - patterns for the file, files or data sets to exclude from src list |
128 |
| - and glob expansion. |
| 128 | + - Remote absolute path, glob, or list of paths, globs, data set name |
| 129 | + patterns or generation data sets (GDSs) in relative notation for the file, |
| 130 | + files or data sets to exclude from src list and glob expansion. |
129 | 131 | - "Patterns (wildcards) can contain one of the following, `?`, `*`."
|
130 | 132 | - "* matches everything."
|
131 | 133 | - "? matches any single character."
|
|
331 | 333 | name: tar
|
332 | 334 |
|
333 | 335 | # Archive multiple files
|
334 |
| -- name: Compress list of files into a zip |
| 336 | +- name: Archive list of files into a zip |
335 | 337 | zos_archive:
|
336 | 338 | src:
|
337 | 339 | - /tmp/archive/foo.txt
|
|
341 | 343 | name: zip
|
342 | 344 |
|
343 | 345 | # Archive one data set into terse
|
344 |
| -- name: Compress data set into a terse |
| 346 | +- name: Archive data set into a terse |
345 | 347 | zos_archive:
|
346 | 348 | src: "USER.ARCHIVE.TEST"
|
347 | 349 | dest: "USER.ARCHIVE.RESULT.TRS"
|
348 | 350 | format:
|
349 | 351 | name: terse
|
350 | 352 |
|
351 | 353 | # Use terse with different options
|
352 |
| -- name: Compress data set into a terse, specify pack algorithm and use adrdssu |
| 354 | +- name: Archive data set into a terse, specify pack algorithm and use adrdssu |
353 | 355 | zos_archive:
|
354 | 356 | src: "USER.ARCHIVE.TEST"
|
355 | 357 | dest: "USER.ARCHIVE.RESULT.TRS"
|
|
360 | 362 | use_adrdssu: true
|
361 | 363 |
|
362 | 364 | # Use a pattern to store
|
363 |
| -- name: Compress data set pattern using xmit |
| 365 | +- name: Archive data set pattern using xmit |
364 | 366 | zos_archive:
|
365 | 367 | src: "USER.ARCHIVE.*"
|
366 | 368 | exclude_sources: "USER.ARCHIVE.EXCLUDE.*"
|
367 | 369 | dest: "USER.ARCHIVE.RESULT.XMIT"
|
368 | 370 | format:
|
369 | 371 | name: xmit
|
| 372 | +
|
| 373 | +- name: Archive multiple GDSs into a terse |
| 374 | + zos_archive: |
| 375 | + src: |
| 376 | + - "USER.GDG(0)" |
| 377 | + - "USER.GDG(-1)" |
| 378 | + - "USER.GDG(-2)" |
| 379 | + dest: "USER.ARCHIVE.RESULT.TRS" |
| 380 | + format: |
| 381 | + name: terse |
| 382 | + format_options: |
| 383 | + use_adrdssu: True |
| 384 | +
|
| 385 | +- name: Archive multiple data sets into a new GDS |
| 386 | + zos_archive: |
| 387 | + src: "USER.ARCHIVE.*" |
| 388 | + dest: "USER.GDG(+1)" |
| 389 | + format: |
| 390 | + name: terse |
| 391 | + format_options: |
| 392 | + use_adrdssu: True |
370 | 393 | '''
|
371 | 394 |
|
372 | 395 | RETURN = r'''
|
|
415 | 438 | returned: always
|
416 | 439 | '''
|
417 | 440 |
|
418 |
| -from ansible.module_utils.basic import AnsibleModule |
419 |
| -from ansible.module_utils._text import to_bytes |
420 |
| -from ansible_collections.ibm.ibm_zos_core.plugins.module_utils import ( |
421 |
| - better_arg_parser, |
422 |
| - data_set, |
423 |
| - validation, |
424 |
| - mvs_cmd, |
425 |
| -) |
426 |
| -from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.import_handler import ( |
427 |
| - ZOAUImportError, |
428 |
| -) |
429 |
| -import os |
430 |
| -import tarfile |
431 |
| -import zipfile |
432 | 441 | import abc
|
433 | 442 | import glob
|
434 |
| -import re |
435 | 443 | import math
|
| 444 | +import os |
| 445 | +import re |
| 446 | +import tarfile |
436 | 447 | import traceback
|
| 448 | +import zipfile |
437 | 449 | from hashlib import sha256
|
438 | 450 |
|
| 451 | +from ansible.module_utils._text import to_bytes |
| 452 | +from ansible.module_utils.basic import AnsibleModule |
| 453 | +from ansible_collections.ibm.ibm_zos_core.plugins.module_utils import ( |
| 454 | + better_arg_parser, data_set, mvs_cmd, validation) |
| 455 | +from ansible_collections.ibm.ibm_zos_core.plugins.module_utils.import_handler import \ |
| 456 | + ZOAUImportError |
439 | 457 |
|
440 | 458 | try:
|
441 | 459 | from zoautil_py import datasets
|
@@ -1278,11 +1296,17 @@ def expand_mvs_paths(self, paths):
|
1278 | 1296 | """
|
1279 | 1297 | expanded_path = []
|
1280 | 1298 | for path in paths:
|
| 1299 | + e_path = [] |
1281 | 1300 | if '*' in path:
|
1282 | 1301 | # list_dataset_names returns a list of data set names or empty.
|
1283 | 1302 | e_paths = datasets.list_dataset_names(path)
|
1284 | 1303 | else:
|
1285 | 1304 | e_paths = [path]
|
| 1305 | + |
| 1306 | + # resolve GDS relative names |
| 1307 | + for index, e_path in enumerate(e_paths): |
| 1308 | + if data_set.DataSet.is_gds_relative_name(e_path): |
| 1309 | + e_paths[index] = data_set.DataSet.resolve_gds_absolute_name(e_path) |
1286 | 1310 | expanded_path.extend(e_paths)
|
1287 | 1311 | return expanded_path
|
1288 | 1312 |
|
@@ -1415,17 +1439,18 @@ def archive_targets(self):
|
1415 | 1439 | self.module.fail_json(
|
1416 | 1440 | msg="To archive multiple source data sets, you must use option 'use_adrdssu=True'.")
|
1417 | 1441 | source = self.targets[0]
|
1418 |
| - # dest = self.create_dest_ds(self.dest) |
1419 |
| - dest, changed = self._create_dest_data_set( |
| 1442 | + dataset = data_set.MVSDataSet( |
1420 | 1443 | name=self.dest,
|
1421 |
| - replace=True, |
1422 |
| - type='seq', |
| 1444 | + data_set_type='seq', |
1423 | 1445 | record_format='fb',
|
1424 | 1446 | record_length=AMATERSE_RECORD_LENGTH,
|
1425 | 1447 | space_primary=self.dest_data_set.get("space_primary"),
|
1426 |
| - space_type=self.dest_data_set.get("space_type")) |
| 1448 | + space_type=self.dest_data_set.get("space_type") |
| 1449 | + ) |
| 1450 | + changed = dataset.create(replace=True) |
1427 | 1451 | self.changed = self.changed or changed
|
1428 |
| - self.add(source, dest) |
| 1452 | + self.dest = dataset.name |
| 1453 | + self.add(source, self.dest) |
1429 | 1454 | self.clean_environment(data_sets=self.tmp_data_sets)
|
1430 | 1455 |
|
1431 | 1456 |
|
@@ -1509,16 +1534,19 @@ def archive_targets(self):
|
1509 | 1534 | msg="To archive multiple source data sets, you must use option 'use_adrdssu=True'.")
|
1510 | 1535 | source = self.sources[0]
|
1511 | 1536 | # dest = self.create_dest_ds(self.dest)
|
1512 |
| - dest, changed = self._create_dest_data_set( |
| 1537 | + dataset = data_set.MVSDataSet( |
1513 | 1538 | name=self.dest,
|
1514 |
| - replace=True, |
1515 |
| - type='seq', |
| 1539 | + data_set_type='seq', |
1516 | 1540 | record_format='fb',
|
1517 | 1541 | record_length=XMIT_RECORD_LENGTH,
|
1518 | 1542 | space_primary=self.dest_data_set.get("space_primary"),
|
1519 |
| - space_type=self.dest_data_set.get("space_type")) |
| 1543 | + space_type=self.dest_data_set.get("space_type") |
| 1544 | + ) |
| 1545 | + changed = dataset.create(replace=True) |
| 1546 | + self.changed = self.changed or changed |
1520 | 1547 | self.changed = self.changed or changed
|
1521 |
| - self.add(source, dest) |
| 1548 | + self.dest = dataset.name |
| 1549 | + self.add(source, self.dest) |
1522 | 1550 | self.clean_environment(data_sets=self.tmp_data_sets)
|
1523 | 1551 |
|
1524 | 1552 | def get_error_hint(self, output):
|
|
0 commit comments