|
| 1 | +#! /usr/bin/python |
| 2 | +# -*- coding: utf8 -*- |
| 3 | + |
| 4 | + |
| 5 | + |
| 6 | +import tensorflow as tf |
| 7 | + |
| 8 | +def identity(x, name=None): |
| 9 | + """The identity activation function, Shortcut is ``linear``. |
| 10 | +
|
| 11 | + Parameters |
| 12 | + ---------- |
| 13 | + x : a tensor input |
| 14 | + input(s) |
| 15 | +
|
| 16 | +
|
| 17 | + Returns |
| 18 | + -------- |
| 19 | + A `Tensor` with the same type as `x`. |
| 20 | + """ |
| 21 | + return x |
| 22 | + |
| 23 | +# Shortcut |
| 24 | +linear = identity |
| 25 | + |
| 26 | +def ramp(x=None, v_min=0, v_max=1, name=None): |
| 27 | + """The ramp activation function. |
| 28 | +
|
| 29 | + Parameters |
| 30 | + ---------- |
| 31 | + x : a tensor input |
| 32 | + input(s) |
| 33 | + v_min : float |
| 34 | + if input(s) smaller than v_min, change inputs to v_min |
| 35 | + v_max : float |
| 36 | + if input(s) greater than v_max, change inputs to v_max |
| 37 | + name : a string or None |
| 38 | + An optional name to attach to this activation function. |
| 39 | +
|
| 40 | +
|
| 41 | + Returns |
| 42 | + -------- |
| 43 | + A `Tensor` with the same type as `x`. |
| 44 | + """ |
| 45 | + return tf.clip_by_value(x, clip_value_min=v_min, clip_value_max=v_max, name=name) |
| 46 | + |
| 47 | +def leaky_relu(x=None, alpha=0.1, name="LeakyReLU"): |
| 48 | + """The LeakyReLU, Shortcut is ``lrelu``. |
| 49 | +
|
| 50 | + Modified version of ReLU, introducing a nonzero gradient for negative |
| 51 | + input. |
| 52 | +
|
| 53 | + Parameters |
| 54 | + ---------- |
| 55 | + x : A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, |
| 56 | + `int16`, or `int8`. |
| 57 | + alpha : `float`. slope. |
| 58 | + name : a string or None |
| 59 | + An optional name to attach to this activation function. |
| 60 | +
|
| 61 | + Examples |
| 62 | + --------- |
| 63 | + >>> network = tl.layers.DenseLayer(network, n_units=100, name = 'dense_lrelu', |
| 64 | + ... act= lambda x : tl.act.lrelu(x, 0.2)) |
| 65 | +
|
| 66 | + References |
| 67 | + ------------ |
| 68 | + - `Rectifier Nonlinearities Improve Neural Network Acoustic Models, Maas et al. (2013) <http://web.stanford.edu/~awni/papers/relu_hybrid_icml2013_final.pdf>`_ |
| 69 | + """ |
| 70 | + with tf.name_scope(name) as scope: |
| 71 | + # x = tf.nn.relu(x) |
| 72 | + # m_x = tf.nn.relu(-x) |
| 73 | + # x -= alpha * m_x |
| 74 | + x = tf.maximum(x, alpha * x) |
| 75 | + return x |
| 76 | + |
| 77 | +#Shortcut |
| 78 | +lrelu = leaky_relu |
| 79 | + |
| 80 | +def pixel_wise_softmax(output, name='pixel_wise_softmax'): |
| 81 | + """Return the softmax outputs of images, every pixels have multiple label, the sum of a pixel is 1. |
| 82 | + Usually be used for image segmentation. |
| 83 | +
|
| 84 | + Parameters |
| 85 | + ------------ |
| 86 | + output : tensor |
| 87 | + - For 2d image, 4D tensor [batch_size, height, weight, channel], channel >= 2. |
| 88 | + - For 3d image, 5D tensor [batch_size, depth, height, weight, channel], channel >= 2. |
| 89 | +
|
| 90 | + Examples |
| 91 | + --------- |
| 92 | + >>> outputs = pixel_wise_softmax(network.outputs) |
| 93 | + >>> dice_loss = 1 - dice_coe(outputs, y_, epsilon=1e-5) |
| 94 | +
|
| 95 | + References |
| 96 | + ----------- |
| 97 | + - `tf.reverse <https://www.tensorflow.org/versions/master/api_docs/python/array_ops.html#reverse>`_ |
| 98 | + """ |
| 99 | + with tf.name_scope(name) as scope: |
| 100 | + return tf.nn.softmax(output) |
| 101 | + ## old implementation |
| 102 | + # exp_map = tf.exp(output) |
| 103 | + # if output.get_shape().ndims == 4: # 2d image |
| 104 | + # evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, True])) |
| 105 | + # elif output.get_shape().ndims == 5: # 3d image |
| 106 | + # evidence = tf.add(exp_map, tf.reverse(exp_map, [False, False, False, False, True])) |
| 107 | + # else: |
| 108 | + # raise Exception("output parameters should be 2d or 3d image, not %s" % str(output._shape)) |
| 109 | + # return tf.div(exp_map, evidence) |
0 commit comments