Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 0 additions & 39 deletions .github/workflows/python-app.yml

This file was deleted.

34 changes: 34 additions & 0 deletions .github/workflows/python-tests.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
name: Run Pytest

on:
push:
branches: [ main, master ]
pull_request:
branches: [ main, master ]

jobs:
test:
runs-on: ubuntu-latest

strategy:
matrix:
python-version: ["3.11" ] # adjust versions you want to support

steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

- name: Install dependencies
run: |
python -m pip install --upgrade pip
python setup.py install
pip install pytest

- name: Run tests with pytest
run: |
pytest --maxfail=1 --disable-warnings -v
57 changes: 23 additions & 34 deletions gen_requirements.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,19 +59,22 @@
# Maps named DATAGEN piece (see description above) to a list of names of Python packages. Please use
# alphabetical order for each package list, and do not add version constraints here!
REQUIREMENTS_BY_PIECE: RequirementsByPieceType = [
# Base requirements needed to install tvm.
# Base requirements needed to install gsm-data-generator.
(
"core",
(
"Base requirements needed to install tvm",
"Base requirements needed to install gsm-data-generator",
[
"cloudpickle",
# "ml_dtypes",
"docutils",
"numpy",
"packaging",
"pandas",
"psutil",
"scipy",
"tornado",
"pycryptodome",
"pydantic",
"python-dateutil",
"pytz",
"typing_extensions",
],
),
Expand Down Expand Up @@ -107,18 +110,11 @@
# ],
# ),
# ),
#
# (
# "importer-tensorflow",
# ("Requirements for the TensorFlow importer", ["tensorflow", "tensorflow-estimator"]),
# ),
# (
# "importer-tflite",
# ("Requirements for the TFLite importer", ["tensorflow", "tensorflow-estimator", "tflite"]),
# ),
# (
# "tvmc",
# "gsm-data-generator-c",
# (
# "Requirements for the tvmc command-line tool",
# "Requirements for the gsm-data-generator-c command-line tool",
# [
# "ethos-u-vela",
# "future", # Hidden dependency of torch.
Expand All @@ -133,18 +129,6 @@
# ],
# ),
# ),
# # XGBoost, useful for autotuning on some targets.
# (
# "xgboost",
# (
# "Requirements for XGBoost autotuning",
# [
# "future", # Hidden dependency of torch.
# "torch",
# "xgboost",
# ],
# ),
# ),
# Development requirements
(
"dev",
Expand All @@ -155,12 +139,15 @@
"autodocsumm",
"black",
"commonmark",
# "cpplint",
"dateutil",
"docutils",
"image",
"matplotlib",
"pillow",
"pandas",
"pydantic",
# "image",
# "matplotlib",
# "pillow",
"pylint",
"pytz",
"sphinx",
"sphinx_autodoc_annotation",
"sphinx_gallery",
Expand All @@ -171,6 +158,7 @@
),
]


ConstraintsType = typing.List[typing.Tuple[str, typing.Union[None, str]]]

# Maps a named Python package (which should appear in REQUIREMENTS_BY_PIECE above) to a
Expand All @@ -186,13 +174,14 @@
# 2. If DATAGEN will functionally break against an old version of a dependency, specify a >= relation
# here. Include a comment linking to context or explaining why the constraint is in place.
CONSTRAINTS = [
("pydantic", ">=2.7"),

# ("astroid", None),
# ("autodocsumm", None),
# ("black", "==20.8b1"),
# ("cloudpickle", None),
# ("commonmark", ">=0.7.3"), # From PR #213.
# ("coremltools", None),
# ("cpplint", None),
# # ("pydantic", ">=2.7,<3"),
# (
# "docutils",
# "<0.17",
Expand All @@ -207,7 +196,7 @@
# ("pillow", None),
# ("psutil", None),
# ("pylint", None),
# ("scipy", None),
# # ("scipy", None),
# ("sphinx", None),
# ("sphinx_autodoc_annotation", None),
# ("sphinx_gallery", None),
Expand Down
12 changes: 9 additions & 3 deletions gsm_data_generator/executor/script_gpt.py
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,9 @@ def generate_opc(self, ki: str) -> str:
# ---------------------------
# DATAFRAME PROCESSING
# ---------------------------
def apply_function(self, df: pd.DataFrame, dest: str, src: str, function) -> pd.DataFrame:
def apply_function(
self, df: pd.DataFrame, dest: str, src: str, function
) -> pd.DataFrame:
"""Apply transformation function on `src` column to produce `dest` column."""
if dest in df.columns:
df[dest] = df[src].apply(function)
Expand All @@ -126,7 +128,9 @@ def apply_functions(self, df: pd.DataFrame) -> pd.DataFrame:
df["ADM6"] = df["ADM6"].apply(lambda _: self.generate_code("ADM6", 8))

df["KI"] = df["KI"].apply(lambda _: self.data_generator.generate_ki())
df["ACC"] = df["IMSI"].apply(lambda imsi: self.dep_data_generator.calculate_acc(imsi=str(imsi)))
df["ACC"] = df["IMSI"].apply(
lambda imsi: self.dep_data_generator.calculate_acc(imsi=str(imsi))
)

# Apply EKI / OPC
self.apply_function(df, "EKI", "KI", self.generate_eki)
Expand All @@ -137,6 +141,8 @@ def apply_functions(self, df: pd.DataFrame) -> pd.DataFrame:
for key in ["KIC", "KID", "KIK"]:
col = f"{key}{i}"
if col in df.columns:
df[col] = df["KI"].apply(lambda _: self.data_generator.generate_otas())
df[col] = df["KI"].apply(
lambda _: self.data_generator.generate_otas()
)

return df
Loading