|
97 | 97 | },
|
98 | 98 | "outputs": [],
|
99 | 99 | "source": [
|
100 |
| - "!pip install git+https://github.com/tensorflow/examples.git" |
| 100 | + "!pip install git+https://github.com/tensorflow/examples.git\n", |
| 101 | + "!pip install -U keras\n", |
| 102 | + "!pip install -q tensorflow_datasets\n", |
| 103 | + "!pip install -q -U tensorflow-text tensorflow" |
101 | 104 | ]
|
102 | 105 | },
|
103 | 106 | {
|
|
108 | 111 | },
|
109 | 112 | "outputs": [],
|
110 | 113 | "source": [
|
111 |
| - "import tensorflow as tf\n", |
| 114 | + "import numpy as np\n", |
112 | 115 | "\n",
|
| 116 | + "import tensorflow as tf\n", |
113 | 117 | "import tensorflow_datasets as tfds"
|
114 | 118 | ]
|
115 | 119 | },
|
|
252 | 256 | " # both use the same seed, so they'll make the same random changes.\n",
|
253 | 257 | " self.augment_inputs = tf.keras.layers.RandomFlip(mode=\"horizontal\", seed=seed)\n",
|
254 | 258 | " self.augment_labels = tf.keras.layers.RandomFlip(mode=\"horizontal\", seed=seed)\n",
|
255 |
| - " \n", |
| 259 | + "\n", |
256 | 260 | " def call(self, inputs, labels):\n",
|
257 | 261 | " inputs = self.augment_inputs(inputs)\n",
|
258 | 262 | " labels = self.augment_labels(labels)\n",
|
|
450 | 454 | "source": [
|
451 | 455 | "## Train the model\n",
|
452 | 456 | "\n",
|
453 |
| - "Now, all that is left to do is to compile and train the model. \n", |
| 457 | + "Now, all that is left to do is to compile and train the model.\n", |
454 | 458 | "\n",
|
455 | 459 | "Since this is a multiclass classification problem, use the `tf.keras.losses.SparseCategoricalCrossentropy` loss function with the `from_logits` argument set to `True`, since the labels are scalar integers instead of vectors of scores for each pixel of every class.\n",
|
456 | 460 | "\n",
|
|
490 | 494 | },
|
491 | 495 | "outputs": [],
|
492 | 496 | "source": [
|
493 |
| - "tf.keras.utils.plot_model(model, show_shapes=True)" |
| 497 | + "tf.keras.utils.plot_model(model, show_shapes=True, expand_nested=True, dpi=64)" |
494 | 498 | ]
|
495 | 499 | },
|
496 | 500 | {
|
|
695 | 699 | },
|
696 | 700 | "outputs": [],
|
697 | 701 | "source": [
|
698 |
| - "label = [0,0]\n", |
699 |
| - "prediction = [[-3., 0], [-3, 0]] \n", |
700 |
| - "sample_weight = [1, 10] \n", |
| 702 | + "label = np.array([0,0])\n", |
| 703 | + "prediction = np.array([[-3., 0], [-3, 0]])\n", |
| 704 | + "sample_weight = [1, 10]\n", |
701 | 705 | "\n",
|
702 |
| - "loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True,\n", |
703 |
| - " reduction=tf.keras.losses.Reduction.NONE)\n", |
| 706 | + "loss = tf.keras.losses.SparseCategoricalCrossentropy(\n", |
| 707 | + " from_logits=True,\n", |
| 708 | + " reduction=tf.keras.losses.Reduction.NONE\n", |
| 709 | + ")\n", |
704 | 710 | "loss(label, prediction, sample_weight).numpy()"
|
705 | 711 | ]
|
706 | 712 | },
|
|
729 | 735 | " class_weights = tf.constant([2.0, 2.0, 1.0])\n",
|
730 | 736 | " class_weights = class_weights/tf.reduce_sum(class_weights)\n",
|
731 | 737 | "\n",
|
732 |
| - " # Create an image of `sample_weights` by using the label at each pixel as an \n", |
| 738 | + " # Create an image of `sample_weights` by using the label at each pixel as an\n", |
733 | 739 | " # index into the `class weights` .\n",
|
734 | 740 | " sample_weights = tf.gather(class_weights, indices=tf.cast(label, tf.int32))\n",
|
735 | 741 | "\n",
|
|
811 | 817 | "metadata": {
|
812 | 818 | "accelerator": "GPU",
|
813 | 819 | "colab": {
|
814 |
| - "collapsed_sections": [], |
815 | 820 | "name": "segmentation.ipynb",
|
816 | 821 | "toc_visible": true
|
817 | 822 | },
|
|
0 commit comments