Skip to content
This repository was archived by the owner on Oct 10, 2025. It is now read-only.

Commit 9b44879

Browse files
authored
Merge pull request #39 from aws-solutions/feature/v1.4.3
Update to version v1.4.3
2 parents 0f1be32 + 82de37c commit 9b44879

File tree

39 files changed

+277
-162
lines changed

39 files changed

+277
-162
lines changed

CHANGELOG.md

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,14 @@ All notable changes to this project will be documented in this file.
55
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
66
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
77

8+
## [1.4.3] - 2023-10-12
9+
10+
### Changed
11+
12+
- Upgrade aws-cdk to 2.88.0
13+
- Upgrade deprecated methods in App-registry
14+
- Address or Fix all SonarQube issues
15+
816
## [1.4.2] - 2023-06-22
917

1018
### Changed

NOTICE.txt

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,5 +37,13 @@ requests-mock under the Apache License Version 2.0
3737
rich under the Massachusetts Institute of Technology (MIT) license
3838
tenacity under the Apache License Version 2.0
3939
quartz-scheduler under the Apache License Version 2.0
40+
parsedatetime under the Apache License Version 2.0
41+
urllib3 under the Massachusetts Institute of Technology (MIT) license
42+
setuptools under the Massachusetts Institute of Technology (MIT) license
43+
pipenv under the Massachusetts Institute of Technology (MIT) license
44+
virtualenv under the Massachusetts Institute of Technology (MIT) license
45+
tox under the Massachusetts Institute of Technology (MIT) license
46+
tox-pyenv under the Apache License Version 2.0
47+
poetry under the Massachusetts Institute of Technology (MIT) license
4048

4149
The Apache License Version Version 2.0 is included in LICENSE.txt.

README.md

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -609,7 +609,7 @@ The following procedures assumes that all the OS-level configuration has been co
609609
- [AWS Command Line Interface](https://aws.amazon.com/cli/)
610610
- [Python](https://www.python.org/) 3.9 or newer
611611
- [Node.js](https://nodejs.org/en/) 16.x or newer
612-
- [AWS CDK](https://aws.amazon.com/cdk/) 2.75.0 or newer
612+
- [AWS CDK](https://aws.amazon.com/cdk/) 2.88.0 or newer
613613
- [Amazon Corretto OpenJDK](https://docs.aws.amazon.com/corretto/) 17.0.4.1
614614

615615
> **Please ensure you test the templates before updating any production deployments.**
@@ -707,7 +707,7 @@ After running the command, you can deploy the template:
707707
## Collection of operational metrics
708708

709709
This solution collects anonymous operational metrics to help AWS improve the quality of features of the solution.
710-
For more information, including how to disable this capability, please see the [implementation guide](https://docs.aws.amazon.com/solutions/latest/maintaining-personalized-experiences-with-ml/collection-of-operational-metrics.html).
710+
For more information, including how to disable this capability, please see the [implementation guide](https://docs.aws.amazon.com/solutions/latest/maintaining-personalized-experiences-with-ml/reference.html).
711711

712712
---
713713

source/aws_lambda/prepare_input/handler.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,7 +23,7 @@
2323
metrics = Metrics()
2424

2525

26-
def lambda_handler(event: Dict[str, Any], context: LambdaContext) -> Dict:
26+
def lambda_handler(event: Dict[str, Any], _) -> Dict:
2727
"""Add timeStarted to the workflowConfig of all items
2828
:param event: AWS Lambda Event
2929
:param context: AWS Lambda Context

source/aws_lambda/shared/personalize_service.py

Lines changed: 68 additions & 72 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,19 @@
6565
("timeStarted", Resource),
6666
("solutionVersionArn", SolutionVersion),
6767
)
68-
68+
RESOURCE_TYPES = [
69+
"datasetGroup",
70+
"datasetImport",
71+
"dataset",
72+
"eventTracker",
73+
"solution",
74+
"solutionVersion",
75+
"filter",
76+
"recommender",
77+
"campaign",
78+
"batchJob",
79+
"segmentJob"
80+
]
6981

7082
def get_duplicates(items):
7183
if isinstance(items, str):
@@ -714,71 +726,60 @@ def _validate_filters(self, path="filters[].serviceConfig"):
714726
self._fill_default_vals("filter", _filter)
715727

716728
def _validate_type(self, var, typ, err: str):
717-
validates = isinstance(var, typ)
729+
validates = isinstance(var, typ) and var is not None
730+
718731
if not validates:
719732
self._configuration_errors.append(err)
733+
720734
return validates
721735

722-
def _validate_solutions(self, path="solutions[]"):
736+
def _validate_solutions(self, path="solutions[]"):
723737
solutions = jmespath.search(path, self.config_dict) or {}
724-
for idx, _solution in enumerate(solutions):
725-
campaigns = _solution.get("campaigns", [])
726-
if self._validate_type(campaigns, list, f"solutions[{idx}].campaigns must be a list"):
727-
self._validate_campaigns(f"solutions[{idx}].campaigns", campaigns)
728-
729-
batch_inference_jobs = _solution.get("batchInferenceJobs", [])
730-
if batch_inference_jobs and self._validate_type(
731-
batch_inference_jobs,
732-
list,
733-
f"solutions[{idx}].batchInferenceJobs must be a list",
734-
):
735-
self._validate_batch_inference_jobs(
736-
path=f"solutions[{idx}].batchInferenceJobs",
737-
solution_name=_solution.get("serviceConfig", {}).get("name", ""),
738-
batch_inference_jobs=batch_inference_jobs,
739-
)
740738

741-
batch_segment_jobs = _solution.get("batchSegmentJobs", [])
742-
if batch_segment_jobs and self._validate_type(
743-
batch_segment_jobs,
744-
list,
745-
f"solutions[{idx}].batchSegmentJobs must be a list",
746-
):
747-
self._validate_batch_segment_jobs(
748-
path=f"solutions[{idx}].batchSegmentJobs",
749-
solution_name=_solution.get("serviceConfig", {}).get("name", ""),
750-
batch_segment_jobs=batch_segment_jobs,
751-
)
739+
for idx, _solution in enumerate(solutions):
740+
# Validate campaigns and batch jobs
741+
self._validate_campaigns(f"solutions[{idx}].campaigns", _solution.get("campaigns", []))
742+
self._validate_batch_inference_jobs(
743+
path=f"solutions[{idx}].batchInferenceJobs",
744+
solution_name=_solution.get("serviceConfig", {}).get("name", ""),
745+
batch_inference_jobs=_solution.get("batchInferenceJobs", []),
746+
)
747+
self._validate_batch_segment_jobs(
748+
path=f"solutions[{idx}].batchSegmentJobs",
749+
solution_name=_solution.get("serviceConfig", {}).get("name", ""),
750+
batch_segment_jobs=_solution.get("batchSegmentJobs", []),
751+
)
752752

753-
_solution = _solution.get("serviceConfig")
753+
# Validate service configuration
754+
_service_config = _solution.get("serviceConfig")
754755

755-
if not self._validate_type(_solution, dict, f"solutions[{idx}].serviceConfig must be an object"):
756+
if not self._validate_type(_service_config, dict, f"solutions[{idx}].serviceConfig must be an object"):
756757
continue
757758

758759
# `performAutoML` is currently returned from InputValidator.validate() as a valid field
759760
# Once the botocore Stubber is updated to not have this param anymore in `create_solution` call,
760761
# this check can be deleted.
761-
if "performAutoML" in _solution:
762-
del _solution["performAutoML"]
762+
if "performAutoML" in _service_config:
763+
del _service_config["performAutoML"]
763764
logger.error(
764765
"performAutoML is not a valid configuration parameter - proceeding to create the "
765766
"solution without this feature. For more details, refer to the Maintaining Personalized Experiences "
766767
"Github project's README.md file."
767768
)
768769

769-
_solution["datasetGroupArn"] = DatasetGroup().arn("validation")
770-
if "solutionVersion" in _solution:
771-
# To pass solution through InputValidator
772-
solution_version_config = _solution["solutionVersion"]
773-
del _solution["solutionVersion"]
774-
self._validate_resource(Solution(), _solution)
775-
_solution["solutionVersion"] = solution_version_config
770+
_service_config["datasetGroupArn"] = DatasetGroup().arn("validation")
776771

772+
if "solutionVersion" in _service_config:
773+
# To pass solution through InputValidator
774+
solution_version_config = _service_config["solutionVersion"]
775+
del _service_config["solutionVersion"]
776+
self._validate_resource(Solution(), _service_config)
777+
_service_config["solutionVersion"] = solution_version_config
777778
else:
778-
self._validate_resource(Solution(), _solution)
779+
self._validate_resource(Solution(), _service_config)
779780

780-
self._fill_default_vals("solution", _solution)
781-
self._validate_solution_version(_solution)
781+
self._fill_default_vals("solution", _service_config)
782+
self._validate_solution_version(_service_config)
782783

783784
def _validate_solution_version(self, solution_config):
784785
allowed_sol_version_keys = ["trainingMode", "tags"]
@@ -819,6 +820,8 @@ def _validate_solution_update(self):
819820
)
820821

821822
def _validate_campaigns(self, path, campaigns: List[Dict]):
823+
self._validate_type(campaigns, list, f"{path} must be a list")
824+
822825
for idx, campaign_config in enumerate(campaigns):
823826
current_path = f"{path}.campaigns[{idx}]"
824827

@@ -832,6 +835,12 @@ def _validate_campaigns(self, path, campaigns: List[Dict]):
832835
self._fill_default_vals("campaign", campaign)
833836

834837
def _validate_batch_inference_jobs(self, path, solution_name, batch_inference_jobs: List[Dict]):
838+
self._validate_type(
839+
batch_inference_jobs,
840+
list,
841+
f"solutions[{path} must be a list",
842+
)
843+
835844
for idx, batch_job_config in enumerate(batch_inference_jobs):
836845
current_path = f"{path}.batchInferenceJobs[{idx}]"
837846

@@ -860,6 +869,12 @@ def _validate_batch_inference_jobs(self, path, solution_name, batch_inference_jo
860869
self._fill_default_vals("batchJob", batch_job)
861870

862871
def _validate_batch_segment_jobs(self, path, solution_name, batch_segment_jobs: List[Dict]):
872+
self._validate_type(
873+
batch_segment_jobs,
874+
list,
875+
f"solutions[{path} must be a list",
876+
)
877+
863878
for idx, batch_job_config in enumerate(batch_segment_jobs):
864879
current_path = f"{path}.batchSegmentJobs[{idx}]"
865880

@@ -1108,42 +1123,23 @@ def _validate_naming(self):
11081123
self._validate_no_duplicates(name="campaign names", path="solutions[].campaigns[].serviceConfig.name")
11091124
self._validate_no_duplicates(name="solution names", path="solutions[].serviceConfig.name")
11101125

1111-
def _fill_default_vals(self, resource_type, resource_dict):
1112-
"""Insert default values for tags and other fields whenever not supplied"""
1113-
1114-
if (
1115-
resource_type
1116-
in [
1117-
"datasetGroup",
1118-
"datasetImport",
1119-
"dataset",
1120-
"eventTracker",
1121-
"solution",
1122-
"solutionVersion",
1123-
"filter",
1124-
"recommender",
1125-
"campaign",
1126-
"batchJob",
1127-
"segmentJob",
1128-
]
1129-
and "tags" not in resource_dict
1130-
):
1126+
def _fill_resource_dict_tags(self, resource_type, resource_dict):
1127+
if resource_type in RESOURCE_TYPES and "tags" not in resource_dict:
11311128
if self.pass_root_tags:
11321129
resource_dict["tags"] = self.config_dict["tags"]
11331130
else:
11341131
resource_dict["tags"] = []
11351132

1133+
def _fill_default_vals(self, resource_type, resource_dict):
1134+
"""Insert default values for tags and other fields whenever not supplied"""
1135+
self._fill_resource_dict_tags(resource_type, resource_dict)
1136+
11361137
if resource_type == "datasetImport":
11371138
if "importMode" not in resource_dict:
11381139
resource_dict["importMode"] = "FULL"
1140+
11391141
if "publishAttributionMetricsToS3" not in resource_dict:
11401142
resource_dict["publishAttributionMetricsToS3"] = False
11411143

1142-
if resource_type == "solutionVersion":
1143-
if "tags" not in resource_dict:
1144-
if self.pass_root_tags:
1145-
resource_dict["tags"] = self.config_dict["tags"]
1146-
else:
1147-
resource_dict["tags"] = []
1148-
if "trainingMode" not in resource_dict:
1149-
resource_dict["trainingMode"] = "FULL"
1144+
if resource_type == "solutionVersion" and "trainingMode" not in resource_dict:
1145+
resource_dict["trainingMode"] = "FULL"

source/cdk_solution_helper_py/helpers_cdk/aws_solutions/cdk/aws_lambda/cfn_custom_resources/resource_hash/hash.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,9 @@
2121
from aws_solutions.cdk.aws_lambda.python.function import SolutionsPythonFunction
2222
from aws_solutions.cdk.cfn_nag import add_cfn_nag_suppressions, CfnNagSuppression
2323

24+
from cdk_nag import NagSuppressions
25+
from cdk_nag import NagPackSuppression
26+
2427

2528
class ResourceHash(Construct):
2629
"""Used to create unique resource names based on the hash of the stack ID"""
@@ -56,6 +59,15 @@ def __init__(
5659
],
5760
)
5861

62+
NagSuppressions.add_resource_suppressions(self._resource_name_function.role, [
63+
NagPackSuppression(
64+
id='AwsSolutions-IAM5',
65+
reason='All IAM policies defined in this solution'
66+
'grant only least-privilege permissions. Wild '
67+
'card for resources is used only for services '
68+
'which do not have a resource arn')],
69+
apply_to_children=True)
70+
5971
properties = {
6072
"ServiceToken": self._resource_name_function.function_arn,
6173
"Purpose": purpose,

source/cdk_solution_helper_py/helpers_cdk/aws_solutions/cdk/aws_lambda/cfn_custom_resources/resource_name/name.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,9 @@
2323
from aws_solutions.cdk.aws_lambda.python.function import SolutionsPythonFunction
2424
from aws_solutions.cdk.cfn_nag import add_cfn_nag_suppressions, CfnNagSuppression
2525

26+
from cdk_nag import NagSuppressions
27+
from cdk_nag import NagPackSuppression
28+
2629

2730
class ResourceName(Construct):
2831
"""Used to create unique resource names of the format {stack_name}-{purpose}-{id}"""
@@ -59,6 +62,15 @@ def __init__(
5962
],
6063
)
6164

65+
NagSuppressions.add_resource_suppressions(self._resource_name_function.role, [
66+
NagPackSuppression(
67+
id='AwsSolutions-IAM5',
68+
reason='All IAM policies defined in this solution'
69+
'grant only least-privilege permissions. Wild '
70+
'card for resources is used only for services '
71+
'which do not have a resource arn')],
72+
apply_to_children=True)
73+
6274
properties = {
6375
"ServiceToken": self._resource_name_function.function_arn,
6476
"Purpose": purpose,
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
requests==2.31.0
2-
urllib3==1.26.16
2+
urllib3==1.26.17
33
crhelper==2.0.11

source/cdk_solution_helper_py/helpers_cdk/aws_solutions/cdk/aws_lambda/java/bundling.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -44,7 +44,7 @@ def __init__(
4444
self.gradle_test = gradle_test
4545
self.distribution_path = distribution_path
4646

47-
def try_bundle(self, output_dir: str, options: BundlingOptions) -> bool:
47+
def try_bundle(self, output_dir: str, options: BundlingOptions) -> bool: #NOSONAR - Options are required for method header
4848
source = Path(self.to_bundle).absolute()
4949

5050
is_gradle_build = (source / "gradlew").exists()

source/cdk_solution_helper_py/helpers_cdk/aws_solutions/cdk/aws_lambda/python/bundling.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ def platform_supports_bundling(self):
5555
logger.info("local bundling %s supported for %s" % ("is" if os_platform_can_bundle else "is not", os_platform))
5656
return os_platform_can_bundle
5757

58-
def try_bundle(self, output_dir: str, options: BundlingOptions) -> bool:
58+
def try_bundle(self, output_dir: str, options: BundlingOptions) -> bool: #NOSONAR - Options are required for method header
5959
if not self.platform_supports_bundling:
6060
raise SolutionsPythonBundlingException("this platform does not support bundling")
6161

0 commit comments

Comments
 (0)