|
7 | 7 | "metadata": {},
|
8 | 8 | "outputs": [],
|
9 | 9 | "source": [
|
10 |
| - "from pathlib import Path\n", |
11 | 10 | "import json\n",
|
12 | 11 | "import re\n",
|
13 | 12 | "import unittest\n",
|
| 13 | + "from pathlib import Path\n", |
| 14 | + "from platform import python_version\n", |
| 15 | + "\n", |
14 | 16 | "import tensorflow as tf\n",
|
15 |
| - "import tensorboard\n", |
16 | 17 | "import tf2onnx\n",
|
17 |
| - "from platform import python_version\n", |
| 18 | + "\n", |
18 | 19 | "\n",
|
19 | 20 | "def get_major_minor(s):\n",
|
20 |
| - " return '.'.join(s.split('.')[:2])\n", |
| 21 | + " return \".\".join(s.split(\".\")[:2])\n", |
| 22 | + "\n", |
21 | 23 | "\n",
|
22 | 24 | "def load_expected_versions() -> dict:\n",
|
23 |
| - " lock_file = Path('./expected_versions.json')\n", |
| 25 | + " lock_file = Path(\"./expected_versions.json\")\n", |
24 | 26 | " data = {}\n",
|
25 | 27 | "\n",
|
26 |
| - " with open(lock_file, 'r') as file:\n", |
| 28 | + " with open(lock_file, \"r\") as file:\n", |
27 | 29 | " data = json.load(file)\n",
|
28 | 30 | "\n",
|
29 |
| - " return data \n", |
| 31 | + " return data\n", |
| 32 | + "\n", |
30 | 33 | "\n",
|
31 | 34 | "def get_expected_version(dependency_name: str) -> str:\n",
|
32 | 35 | " raw_value = expected_versions.get(dependency_name)\n",
|
33 |
| - " raw_version = re.sub(r'^\\D+', '', raw_value)\n", |
34 |
| - " return get_major_minor(raw_version) \n", |
| 36 | + " raw_version = re.sub(r\"^\\D+\", \"\", raw_value)\n", |
| 37 | + " return get_major_minor(raw_version)\n", |
| 38 | + "\n", |
35 | 39 | "\n",
|
36 | 40 | "class TestTensorflowNotebook(unittest.TestCase):\n",
|
37 |
| - " \n", |
| 41 | + "\n", |
38 | 42 | " def test_python_version(self):\n",
|
39 |
| - " expected_major_minor = get_expected_version('Python')\n", |
40 |
| - " actual_major_minor = get_major_minor(python_version()) \n", |
| 43 | + " expected_major_minor = get_expected_version(\"Python\")\n", |
| 44 | + " actual_major_minor = get_major_minor(python_version())\n", |
41 | 45 | " self.assertEqual(actual_major_minor, expected_major_minor, \"incorrect version\")\n",
|
42 |
| - " \n", |
| 46 | + "\n", |
43 | 47 | " def test_tensorflow_version(self):\n",
|
44 |
| - " expected_major_minor = get_expected_version('ROCm-TensorFlow')\n", |
45 |
| - " actual_major_minor = get_major_minor(tf.__version__) \n", |
| 48 | + " expected_major_minor = get_expected_version(\"ROCm-TensorFlow\")\n", |
| 49 | + " actual_major_minor = get_major_minor(tf.__version__)\n", |
46 | 50 | " self.assertEqual(actual_major_minor, expected_major_minor, \"incorrect version\")\n",
|
47 |
| - " \n", |
| 51 | + "\n", |
48 | 52 | " def test_tf2onnx_conversion(self):\n",
|
49 | 53 | " # Replace this with an actual TensorFlow model conversion using tf2onnx\n",
|
50 | 54 | " model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(10,))])\n",
|
51 | 55 | " onnx_model = tf2onnx.convert.from_keras(model)\n",
|
52 |
| - " \n", |
| 56 | + "\n", |
53 | 57 | " self.assertTrue(onnx_model is not None)\n",
|
54 | 58 | "\n",
|
55 | 59 | " def test_mnist_model(self):\n",
|
|
59 | 63 | " x_train, x_test = x_train / 255.0, x_test / 255.0\n",
|
60 | 64 | " model = tf.keras.models.Sequential([\n",
|
61 | 65 | " tf.keras.layers.Flatten(input_shape=(28, 28)),\n",
|
62 |
| - " tf.keras.layers.Dense(128, activation='relu'),\n", |
| 66 | + " tf.keras.layers.Dense(128, activation=\"relu\"),\n", |
63 | 67 | " tf.keras.layers.Dropout(0.2),\n",
|
64 | 68 | " tf.keras.layers.Dense(10)\n",
|
65 | 69 | " ])\n",
|
66 | 70 | " predictions = model(x_train[:1]).numpy()\n",
|
67 |
| - " predictions\n", |
| 71 | + " assert predictions\n", |
68 | 72 | " tf.nn.softmax(predictions).numpy()\n",
|
69 | 73 | " loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)\n",
|
70 | 74 | " loss_fn(y_train[:1], predictions).numpy()\n",
|
71 |
| - " model.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy'])\n", |
| 75 | + " model.compile(optimizer=\"adam\", loss=loss_fn, metrics=[\"accuracy\"])\n", |
72 | 76 | " model.fit(x_train, y_train, epochs=5)\n",
|
73 |
| - " model.evaluate(x_test, y_test, verbose=2)\n", |
| 77 | + " model.evaluate(x_test, y_test, verbose=2)\n", |
74 | 78 | " probability_model = tf.keras.Sequential([\n",
|
75 | 79 | " model,\n",
|
76 | 80 | " tf.keras.layers.Softmax()\n",
|
77 | 81 | " ])\n",
|
78 | 82 | " probability_model(x_test[:5])\n",
|
79 | 83 | "\n",
|
80 | 84 | " def test_tensorboard(self):\n",
|
| 85 | + " # Check tensorboard is installed\n", |
| 86 | + " import tensorboard as _ # noqa: PLC0415, F401\n", |
| 87 | + "\n", |
81 | 88 | " # Create a simple model\n",
|
82 | 89 | " model = tf.keras.Sequential([\n",
|
83 |
| - " tf.keras.layers.Dense(10, input_shape=(5,), activation='relu'),\n", |
| 90 | + " tf.keras.layers.Dense(10, input_shape=(5,), activation=\"relu\"),\n", |
84 | 91 | " tf.keras.layers.Dense(1)\n",
|
85 | 92 | " ])\n",
|
86 | 93 | " # Compile the model\n",
|
87 |
| - " model.compile(optimizer='adam', loss='mse')\n", |
| 94 | + " model.compile(optimizer=\"adam\", loss=\"mse\")\n", |
88 | 95 | " # Generate some example data\n",
|
89 | 96 | " x_train = tf.random.normal((100, 5))\n",
|
90 | 97 | " y_train = tf.random.normal((100, 1))\n",
|
91 | 98 | " # Create a TensorBoard callback\n",
|
92 |
| - " log_dir = './logs'\n", |
| 99 | + " log_dir = \"./logs\"\n", |
93 | 100 | " tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir=log_dir)\n",
|
94 | 101 | " # Train the model\n",
|
95 | 102 | " model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])\n",
|
96 | 103 | "\n",
|
| 104 | + "\n", |
97 | 105 | "expected_versions = load_expected_versions()\n",
|
98 | 106 | "\n",
|
99 | 107 | "suite = unittest.TestLoader().loadTestsFromTestCase(TestTensorflowNotebook)\n",
|
|
0 commit comments