Skip to content

Commit f1922e2

Browse files
author
Alseny Diallo
committed
feat: upgrade to Python 3.12 and fix Docker build issues
- Update Dockerfile to use Python 3.12 Lambda base image - Convert broken multi-stage build to single-stage build - Update dependency versions: Hadoop 3.3.6, AWS SDK 1.12.261, PySpark 3.5.0 - Switch from yum to dnf package manager - Add support for requirements.txt if present - Update spark-class script to prioritize Java 11 Amazon Corretto paths - Fix aws-ecr-repository-push.sh script argument validation and ECR login - Add proper error handling and usage instructions - Update all Python paths from 3.10 to 3.12 throughout - Add diagnostics for spark-class installation and verification
1 parent abfcb58 commit f1922e2

File tree

6 files changed

+259
-52
lines changed

6 files changed

+259
-52
lines changed

Dockerfile

Lines changed: 42 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1,70 +1,75 @@
1-
# Multi-stage build for optimal size
2-
FROM public.ecr.aws/lambda/python:3.10 as builder
1+
FROM public.ecr.aws/lambda/python:3.12
32

43
# Build arguments - consolidated at top
5-
ARG HADOOP_VERSION=3.2.4
6-
ARG AWS_SDK_VERSION=1.11.901
7-
ARG PYSPARK_VERSION=3.3.0
4+
ARG HADOOP_VERSION=3.3.6
5+
ARG AWS_SDK_VERSION=1.12.261
6+
ARG PYSPARK_VERSION=3.5.0
87
ARG FRAMEWORK
98
ARG DELTA_FRAMEWORK_VERSION=2.2.0
109
ARG HUDI_FRAMEWORK_VERSION=0.12.2
1110
ARG ICEBERG_FRAMEWORK_VERSION=3.3_2.12
1211
ARG ICEBERG_FRAMEWORK_SUB_VERSION=1.0.0
1312
ARG DEEQU_FRAMEWORK_VERSION=2.0.3-spark-3.3
13+
ARG AWS_REGION
1414

15-
# Single consolidated RUN layer for all build operations
15+
ENV AWS_REGION=${AWS_REGION}
16+
17+
# System updates and package installation
1618
COPY download_jars.sh /tmp/
1719
RUN set -ex && \
18-
# System updates and package installation
19-
yum update -y && \
20-
yum install -y java-11-amazon-corretto-headless wget unzip && \
21-
yum clean all && \
22-
rm -rf /var/cache/yum && \
23-
# Python package installation
20+
dnf update -y && \
21+
dnf install -y wget unzip java-11-amazon-corretto-headless python3-setuptools && \
22+
dnf clean all && \
23+
rm -rf /var/cache/dnf && \
2424
pip install --no-cache-dir --upgrade pip && \
25+
pip install --no-cache-dir setuptools wheel && \
2526
pip install --no-cache-dir pyspark==$PYSPARK_VERSION boto3 && \
2627
# Conditional DEEQU installation
2728
(echo "$FRAMEWORK" | grep -q "DEEQU" && \
2829
pip install --no-cache-dir --no-deps pydeequ && \
29-
pip install --no-cache-dir pandas || \
30+
pip install --no-cache-dir pandas && \
31+
echo "DEEQU found in FRAMEWORK" || \
3032
echo "DEEQU not found in FRAMEWORK") && \
3133
# JAR download and cleanup
3234
chmod +x /tmp/download_jars.sh && \
33-
SPARK_HOME="/var/lang/lib/python3.10/site-packages/pyspark" && \
35+
SPARK_HOME="/var/lang/lib/python3.12/site-packages/pyspark" && \
3436
/tmp/download_jars.sh $FRAMEWORK $SPARK_HOME $HADOOP_VERSION $AWS_SDK_VERSION $DELTA_FRAMEWORK_VERSION $HUDI_FRAMEWORK_VERSION $ICEBERG_FRAMEWORK_VERSION $ICEBERG_FRAMEWORK_SUB_VERSION $DEEQU_FRAMEWORK_VERSION && \
3537
rm -rf /tmp/* /var/tmp/*
3638

37-
# Final optimized stage
38-
FROM public.ecr.aws/lambda/python:3.10
39+
# Copy requirements.txt if present and install
40+
COPY requirements.txt ${LAMBDA_TASK_ROOT}/
41+
RUN if [ -f "${LAMBDA_TASK_ROOT}/requirements.txt" ]; then pip install --no-cache-dir -r ${LAMBDA_TASK_ROOT}/requirements.txt; fi
3942

40-
# Single consolidated RUN layer for runtime setup
41-
COPY --from=builder /var/lang/lib/python3.10/site-packages/ /var/lang/lib/python3.10/site-packages/
42-
COPY --from=builder /var/runtime/ /var/runtime/
43+
# Copy application files
4344
COPY libs/glue_functions /home/glue_functions
44-
COPY spark-class /var/lang/lib/python3.10/site-packages/pyspark/bin/
45+
COPY spark-class /var/lang/lib/python3.12/site-packages/pyspark/bin/
4546
COPY sparkLambdaHandler.py ${LAMBDA_TASK_ROOT}
47+
# Optionally copy log4j.properties if present
48+
RUN if [ -f log4j.properties ]; then cp log4j.properties /var/lang/lib/python3.12/site-packages/pyspark/conf/; fi
4649

4750
RUN set -ex && \
48-
# Install runtime Java and cleanup
49-
yum update -y && \
50-
yum install -y java-11-amazon-corretto-headless && \
51-
yum clean all && \
52-
rm -rf /var/cache/yum /tmp/* /var/tmp/* && \
53-
# Set permissions in single operation
54-
chmod -R 755 /home/glue_functions /var/lang/lib/python3.10/site-packages/pyspark
51+
dnf update -y && \
52+
dnf install -y java-11-amazon-corretto-headless && \
53+
dnf clean all && \
54+
rm -rf /var/cache/dnf /tmp/* /var/tmp/* && \
55+
chmod -R 755 /home/glue_functions /var/lang/lib/python3.12/site-packages/pyspark && \
56+
# Diagnostics for spark-class
57+
ls -la /var/lang/lib/python3.12/site-packages/pyspark/bin/ || echo "Spark bin directory not found" && \
58+
if [ -f "/var/lang/lib/python3.12/site-packages/pyspark/bin/spark-class" ]; then echo "Custom spark-class after copying:"; cat /var/lang/lib/python3.12/site-packages/pyspark/bin/spark-class; else echo "Custom spark-class not found"; fi && \
59+
ln -sf /var/lang/lib/python3.12/site-packages/pyspark/bin/spark-class /usr/local/bin/spark-class && \
60+
ls -la /usr/local/bin/spark-class
5561

56-
# Consolidated environment variables
57-
ENV SPARK_HOME="/var/lang/lib/python3.10/site-packages/pyspark" \
58-
SPARK_VERSION=3.3.0 \
62+
ENV SPARK_HOME="/var/lang/lib/python3.12/site-packages/pyspark" \
63+
SPARK_VERSION=3.5.0 \
5964
JAVA_HOME="/usr/lib/jvm/java-11-amazon-corretto" \
60-
PATH="$PATH:/var/lang/lib/python3.10/site-packages/pyspark/bin:/var/lang/lib/python3.10/site-packages/pyspark/sbin:/usr/lib/jvm/java-11-amazon-corretto/bin" \
61-
PYTHONPATH="/var/lang/lib/python3.10/site-packages/pyspark/python:/var/lang/lib/python3.10/site-packages/pyspark/python/lib/py4j-0.10.9-src.zip:/home/glue_functions" \
65+
PATH="$PATH:/var/lang/lib/python3.12/site-packages/pyspark/bin:/var/lang/lib/python3.12/site-packages/pyspark/sbin:/usr/lib/jvm/java-11-amazon-corretto/bin" \
66+
PYTHONPATH="/var/lang/lib/python3.12/site-packages/pyspark/python:/var/lang/lib/python3.12/site-packages/pyspark/python/lib/py4j-0.10.9.7-src.zip:/home/glue_functions" \
6267
INPUT_PATH="" \
6368
OUTPUT_PATH="" \
64-
AWS_ACCESS_KEY_ID="" \
65-
AWS_SECRET_ACCESS_KEY="" \
66-
AWS_REGION="" \
67-
AWS_SESSION_TOKEN="" \
6869
CUSTOM_SQL=""
6970

70-
CMD [ "/var/task/sparkLambdaHandler.lambda_handler" ]
71+
RUN java -version
72+
73+
RUN chmod 755 ${LAMBDA_TASK_ROOT}/sparkLambdaHandler.py
74+
75+
CMD [ "sparkLambdaHandler.lambda_handler" ]

aws-ecr-repository-push.sh

100644100755
Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4,11 +4,11 @@
44

55
echo "Starting the PUSH to AWS ECR...."
66

7-
8-
97
if [ $# -eq 0 ]
108
then
119
echo "Please provide the image name"
10+
echo "Usage: $0 <image-name>"
11+
exit 1
1212
fi
1313

1414
Dockerimage=$1
@@ -18,13 +18,13 @@ aws_account=$(aws sts get-caller-identity --query Account --output text)
1818

1919
if [ $? -ne 0 ]
2020
then
21+
echo "Failed to get AWS account number. Please check your AWS credentials."
2122
exit 255
2223
fi
2324

24-
25-
# Get the region defined in the current configuration (default to us-west-2 if none defined)
25+
# Get the region defined in the current configuration (default to us-east-1 if none defined)
2626
aws_region=$(aws configure get region)
27-
aws_region=${region:-us-east-1}
27+
aws_region=${aws_region:-us-east-1}
2828
reponame="${aws_account}.dkr.ecr.${aws_region}.amazonaws.com/${Dockerimage}:latest"
2929

3030
# Creates a repo if it does not exist
@@ -36,17 +36,18 @@ then
3636
aws ecr create-repository --repository-name "${Dockerimage}" > /dev/null
3737
fi
3838

39-
# Get the AWS ECr login
40-
aws ecr get-login-password --region "${aws_region}" | docker login --username AWS --password-stdin "${aws_account}".dkr.ecr."${aws_region}".amazonaws.com
39+
# Get the AWS ECR login to pull base image from public ECR
40+
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin public.ecr.aws
4141

4242
# Build the docker image and push to ECR
4343
echo "Building the docker image"
44-
docker build -t ${Dockerimage} .
45-
44+
docker build -t ${Dockerimage} .
4645

4746
echo "Tagging the Docker image"
4847
docker tag ${Dockerimage} ${reponame}
4948

49+
# Get the AWS ECR login to push the image to private ECR
50+
aws ecr get-login-password --region "${aws_region}" | docker login --username AWS --password-stdin "${aws_account}".dkr.ecr."${aws_region}".amazonaws.com
5051

51-
echo "Pushing the Docket image to AWS ECR"
52+
echo "Pushing the Docker image to AWS ECR"
5253
docker push ${reponame}

download_jars.sh

Lines changed: 104 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -13,11 +13,112 @@ mkdir $SPARK_HOME/conf
1313
echo "SPARK_LOCAL_IP=127.0.0.1" > $SPARK_HOME/conf/spark-env.sh
1414
echo "JAVA_HOME=/usr/lib/jvm/$(ls /usr/lib/jvm |grep java)/jre" >> $SPARK_HOME/conf/spark-env.sh
1515

16+
# Download core S3 filesystem JARs with updated versions
17+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-aws/${HADOOP_VERSION}/hadoop-aws-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
18+
wget -q https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk-bundle/${AWS_SDK_VERSION}/aws-java-sdk-bundle-${AWS_SDK_VERSION}.jar -P ${SPARK_HOME}/jars/
1619

20+
# Additional JARs for better S3 compatibility
21+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-common/${HADOOP_VERSION}/hadoop-common-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
22+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-client/${HADOOP_VERSION}/hadoop-client-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
23+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-client-api/${HADOOP_VERSION}/hadoop-client-api-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
24+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-client-runtime/${HADOOP_VERSION}/hadoop-client-runtime-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
1725

26+
# Add Hadoop statistics and fs libraries to fix NoSuchMethodError
27+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-annotations/${HADOOP_VERSION}/hadoop-annotations-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
28+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-auth/${HADOOP_VERSION}/hadoop-auth-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
29+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/thirdparty/hadoop-shaded-guava/${HADOOP_VERSION}/hadoop-shaded-guava-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
30+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/thirdparty/hadoop-shaded-protobuf_3_7/${HADOOP_VERSION}/hadoop-shaded-protobuf_3_7-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
31+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-mapreduce-client-core/${HADOOP_VERSION}/hadoop-mapreduce-client-core-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
32+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-mapreduce-client-common/${HADOOP_VERSION}/hadoop-mapreduce-client-common-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
33+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-hdfs-client/${HADOOP_VERSION}/hadoop-hdfs-client-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
34+
35+
# Add additional Hadoop libraries to fix S3A filesystem issues
36+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-client-api/${HADOOP_VERSION}/hadoop-client-api-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
37+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-client-runtime/${HADOOP_VERSION}/hadoop-client-runtime-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
38+
39+
# Fix for IOStatisticsBinding NoSuchMethodError
40+
# Download specific version that contains the required IOStatisticsBinding class
41+
FIXED_VERSION="3.3.4"
42+
echo "Downloading fixed Hadoop libraries version $FIXED_VERSION"
43+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-common/$FIXED_VERSION/hadoop-common-$FIXED_VERSION.jar -P ${SPARK_HOME}/jars/
44+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-aws/$FIXED_VERSION/hadoop-aws-$FIXED_VERSION.jar -P ${SPARK_HOME}/jars/
45+
46+
# Download specific statistics implementation jars
47+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/thirdparty/hadoop-shaded-guava/1.1.1/hadoop-shaded-guava-1.1.1.jar -P ${SPARK_HOME}/jars/ || echo "hadoop-shaded-guava not found"
48+
49+
# Download specific fs-statistics JAR that contains IOStatisticsBinding
50+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-common/$FIXED_VERSION/hadoop-common-$FIXED_VERSION-tests.jar -P ${SPARK_HOME}/jars/ || echo "hadoop-common-tests not found"
51+
52+
# Download additional S3A implementation classes
53+
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-aws/$FIXED_VERSION/hadoop-aws-$FIXED_VERSION-tests.jar -P ${SPARK_HOME}/jars/ || echo "hadoop-aws-tests not found"
54+
55+
# Copy the existing log4j.properties file to the Spark conf directory
56+
echo "Copying existing log4j.properties file to Spark conf directory"
57+
cp /opt/spark-on-lambda-handler/log4j.properties ${SPARK_HOME}/conf/
58+
59+
# Create a core-site.xml file with S3A configurations
60+
echo "Creating core-site.xml file"
61+
cat > ${SPARK_HOME}/conf/core-site.xml << EOL
62+
<?xml version="1.0"?>
63+
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
64+
<configuration>
65+
<property>
66+
<name>fs.s3a.impl</name>
67+
<value>org.apache.hadoop.fs.s3a.S3AFileSystem</value>
68+
</property>
69+
<property>
70+
<name>fs.s3a.aws.credentials.provider</name>
71+
<value>com.amazonaws.auth.DefaultAWSCredentialsProviderChain</value>
72+
</property>
73+
<property>
74+
<name>fs.s3a.connection.maximum</name>
75+
<value>100</value>
76+
</property>
77+
<property>
78+
<name>fs.s3a.experimental.input.fadvise</name>
79+
<value>sequential</value>
80+
</property>
81+
<property>
82+
<name>fs.s3a.impl.disable.cache</name>
83+
<value>true</value>
84+
</property>
85+
<property>
86+
<name>fs.s3a.path.style.access</name>
87+
<value>true</value>
88+
</property>
89+
<property>
90+
<name>fs.s3a.committer.name</name>
91+
<value>directory</value>
92+
</property>
93+
<property>
94+
<name>fs.s3a.committer.staging.conflict-mode</name>
95+
<value>append</value>
96+
</property>
97+
<property>
98+
<name>fs.s3a.committer.staging.unique-filenames</name>
99+
<value>true</value>
100+
</property>
101+
<property>
102+
<name>fs.s3a.fast.upload</name>
103+
<value>true</value>
104+
</property>
105+
<property>
106+
<name>mapreduce.fileoutputcommitter.algorithm.version</name>
107+
<value>2</value>
108+
</property>
109+
</configuration>
110+
EOL
111+
112+
# Add AWS SDK v2components for better S3 compatibility
113+
wget -q https://repo1.maven.org/maven2/software/amazon/awssdk/s3/2.20.56/s3-2.20.56.jar -P ${SPARK_HOME}/jars/
114+
wget -q https://repo1.maven.org/maven2/software/amazon/awssdk/utils/2.20.56/utils-2.20.56.jar -P ${SPARK_HOME}/jars/
115+
wget -q https://repo1.maven.org/maven2/software/amazon/awssdk/auth/2.20.56/auth-2.20.56.jar -P ${SPARK_HOME}/jars/
116+
wget -q https://repo1.maven.org/maven2/software/amazon/awssdk/http-client-spi/2.20.56/http-client-spi-2.20.56.jar -P ${SPARK_HOME}/jars/
117+
wget -q https://repo1.maven.org/maven2/software/amazon/awssdk/regions/2.20.56/regions-2.20.56.jar -P ${SPARK_HOME}/jars/
118+
wget -q https://repo1.maven.org/maven2/software/amazon/awssdk/sdk-core/2.20.56/sdk-core-2.20.56.jar -P ${SPARK_HOME}/jars/
119+
wget -q https://repo1.maven.org/maven2/software/amazon/awssdk/apache-client/2.20.56/apache-client-2.20.56.jar -P ${SPARK_HOME}/jars/
120+
wget -q https://repo1.maven.org/maven2/software/amazon/awssdk/aws-core/2.20.56/aws-core-2.20.56.jar -P ${SPARK_HOME}/jars/
18121

19-
wget -q https://repo1.maven.org/maven2/org/apache/hadoop/hadoop-aws/${HADOOP_VERSION}/hadoop-aws-${HADOOP_VERSION}.jar -P ${SPARK_HOME}/jars/
20-
wget -q https://repo1.maven.org/maven2/com/amazonaws/aws-java-sdk-bundle/${AWS_SDK_VERSION}/aws-java-sdk-bundle-${AWS_SDK_VERSION}.jar -P ${SPARK_HOME}/jars/
21122
# jar files needed to conncet to Snowflake
22123
#wget -q https://repo1.maven.org/maven2/net/snowflake/spark-snowflake_2.12/2.12.0-spark_3.3/spark-snowflake_2.12-2.12.0-spark_3.3.jar -P ${SPARK_HOME}/jars/
23124
#wget -q https://repo1.maven.org/maven2/net/snowflake/snowflake-jdbc/3.13.33/snowflake-jdbc-3.13.33.jar -P ${SPARK_HOME}/jars/
@@ -61,4 +162,4 @@ echo $fw
61162
echo "Unknown framework: $fw"
62163
;;
63164
esac
64-
done
165+
done

log4j.properties

Lines changed: 47 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,47 @@
1+
# Set root logger level to DEBUG and its only appender to console
2+
log4j.rootLogger=WARN, console
3+
4+
# Console appender configuration
5+
log4j.appender.console=org.apache.log4j.ConsoleAppender
6+
log4j.appender.console.layout=org.apache.log4j.PatternLayout
7+
log4j.appender.console.layout.ConversionPattern=%d{yy/MM/dd HH:mm:ss} %p %c{1}: %m%n
8+
9+
# Set the default spark-shell log level to WARN. When running the spark-shell, the
10+
# log level for this class is used to overwrite the root logger's log level, so that
11+
# the user can have different defaults for the shell and regular Spark apps.
12+
log4j.logger.org.apache.spark.repl.Main=WARN
13+
14+
# Settings to quiet third party logs that are too verbose
15+
log4j.logger.org.spark_project.jetty=WARN
16+
log4j.logger.org.spark_project.jetty.util.component.AbstractLifeCycle=ERROR
17+
log4j.logger.org.apache.spark.repl.SparkIMain$exprTyper=WARN
18+
log4j.logger.org.apache.spark.repl.SparkILoop$SparkILoopInterpreter=WARN
19+
log4j.logger.org.apache.parquet=ERROR
20+
log4j.logger.parquet=ERROR
21+
22+
# SPARK-9183: Settings to avoid annoying messages when looking up nonexistent UDFs in SparkSQL with Hive support
23+
log4j.logger.org.apache.hadoop.hive.metastore.RetryingHMSHandler=FATAL
24+
log4j.logger.org.apache.hadoop.hive.ql.exec.FunctionRegistry=ERROR
25+
26+
# Set the log level for your application's package
27+
log4j.logger.your.application.package=DEBUG
28+
29+
# Additional settings to reduce logging
30+
log4j.logger.org.apache.spark=WARN
31+
log4j.logger.org.apache.hadoop=WARN
32+
log4j.logger.org.apache.kafka=WARN
33+
log4j.logger.org.apache.zookeeper=WARN
34+
log4j.logger.org.apache.hive=WARN
35+
36+
# Hadoop metrics configuration to reduce warnings
37+
log4j.logger.org.apache.hadoop.metrics2=ERROR
38+
log4j.logger.org.apache.hadoop.metrics2.impl.MetricsConfig=ERROR
39+
log4j.logger.org.apache.hadoop.metrics2.impl.MetricsSystemImpl=ERROR
40+
41+
# S3 filesystem specific logging
42+
log4j.logger.org.apache.hadoop.fs.s3a=WARN
43+
log4j.logger.org.apache.hadoop.fs.s3a.impl=WARN
44+
45+
# AWS SDK logging
46+
log4j.logger.com.amazonaws=WARN
47+
log4j.logger.com.amazonaws.services.s3=WARN

requirements.txt

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
boto3
2+
scikit-learn
3+
pandas
4+
numpy
5+
seaborn
6+
matplotlib
7+
s3fs
8+
openpyxl

0 commit comments

Comments
 (0)