-
Notifications
You must be signed in to change notification settings - Fork 14
/
Copy pathtensorlayer-layers.py
3726 lines (3337 loc) · 158 KB
/
tensorlayer-layers.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#! /usr/bin/python
# -*- coding: utf8 -*-
import tensorflow as tf
import time
from . import visualize
from . import utils
from . import files
from . import cost
from . import iterate
import numpy as np
from six.moves import xrange
import random
import warnings
# __all__ = [
# "Layer",
# "DenseLayer",
# ]
# set_keep = locals()
set_keep = globals()
set_keep['_layers_name_list'] =[]
set_keep['name_reuse'] = False
## Variable Operation
def flatten_reshape(variable, name=''):
"""Reshapes high-dimension input to a vector.
[batch_size, mask_row, mask_col, n_mask] ---> [batch_size, mask_row * mask_col * n_mask]
Parameters
----------
variable : a tensorflow variable
name : a string or None
An optional name to attach to this layer.
Examples
--------
>>> W_conv2 = weight_variable([5, 5, 100, 32]) # 64 features for each 5x5 patch
>>> b_conv2 = bias_variable([32])
>>> W_fc1 = weight_variable([7 * 7 * 32, 256])
>>> h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
>>> h_pool2 = max_pool_2x2(h_conv2)
>>> h_pool2.get_shape()[:].as_list() = [batch_size, 7, 7, 32]
... [batch_size, mask_row, mask_col, n_mask]
>>> h_pool2_flat = tl.layers.flatten_reshape(h_pool2)
... [batch_size, mask_row * mask_col * n_mask]
>>> h_pool2_flat_drop = tf.nn.dropout(h_pool2_flat, keep_prob)
...
"""
dim = 1
for d in variable.get_shape()[1:].as_list():
dim *= d
return tf.reshape(variable, shape=[-1, dim], name=name)
def clear_layers_name():
"""Clear all layer names in set_keep['_layers_name_list'],
enable layer name reuse.
Examples
---------
>>> network = tl.layers.InputLayer(x, name='input_layer')
>>> network = tl.layers.DenseLayer(network, n_units=800, name='relu1')
...
>>> tl.layers.clear_layers_name()
>>> network2 = tl.layers.InputLayer(x, name='input_layer')
>>> network2 = tl.layers.DenseLayer(network2, n_units=800, name='relu1')
...
"""
set_keep['_layers_name_list'] =[]
def set_name_reuse(enable=True):
"""Enable or disable reuse layer name. By default, each layer must has unique
name. When you want two or more input placeholder (inference) share the same
model parameters, you need to enable layer name reuse, then allow the
parameters have same name scope.
Parameters
------------
enable : boolean, enable name reuse.
Examples
------------
>>> def embed_seq(input_seqs, is_train, reuse):
>>> with tf.variable_scope("model", reuse=reuse):
>>> tl.layers.set_name_reuse(reuse)
>>> network = tl.layers.EmbeddingInputlayer(
... inputs = input_seqs,
... vocabulary_size = vocab_size,
... embedding_size = embedding_size,
... name = 'e_embedding')
>>> network = tl.layers.DynamicRNNLayer(network,
... cell_fn = tf.nn.rnn_cell.BasicLSTMCell,
... n_hidden = embedding_size,
... dropout = (0.7 if is_train else None),
... initializer = w_init,
... sequence_length = tl.layers.retrieve_seq_length_op2(input_seqs),
... return_last = True,
... name = 'e_dynamicrnn',)
>>> return network
>>>
>>> net_train = embed_seq(t_caption, is_train=True, reuse=False)
>>> net_test = embed_seq(t_caption, is_train=False, reuse=True)
- see ``tutorial_ptb_lstm.py`` for example.
"""
set_keep['name_reuse'] = enable
def initialize_rnn_state(state):
"""Return the initialized RNN state.
The input is LSTMStateTuple or State of RNNCells.
Parameters
-----------
state : a RNN state.
"""
if isinstance(state, tf.nn.rnn_cell.LSTMStateTuple):
c = state.c.eval()
h = state.h.eval()
return (c, h)
else:
new_state = state.eval()
return new_state
def print_all_variables(train_only=False):
"""Print all trainable and non-trainable variables
without initialize_all_variables()
Parameters
----------
train_only : boolean
If True, only print the trainable variables, otherwise, print all variables.
"""
tvar = tf.trainable_variables() if train_only else tf.all_variables()
for idx, v in enumerate(tvar):
print(" var {:3}: {:15} {}".format(idx, str(v.get_shape()), v.name))
def get_variables_with_name(name, train_only=True, printable=False):
"""Get variable list by a given name scope.
Examples
---------
>>> dense_vars = get_variable_with_name('dense', True, True)
"""
print(" Get variables with %s" % name)
t_vars = tf.trainable_variables() if train_only else tf.all_variables()
d_vars = [var for var in t_vars if name in var.name]
if printable:
for idx, v in enumerate(d_vars):
print(" got {:3}: {:15} {}".format(idx, v.name, str(v.get_shape())))
return d_vars
def list_remove_repeat(l=None):
"""Remove the repeated items in a list, and return the processed list.
You may need it to create merged layer like Concat, Elementwise and etc.
Parameters
----------
l : a list
Examples
---------
>>> l = [2, 3, 4, 2, 3]
>>> l = list_remove_repeat(l)
... [2, 3, 4]
"""
l2 = []
[l2.append(i) for i in l if not i in l2]
return l2
## Basic layer
class Layer(object):
"""
The :class:`Layer` class represents a single layer of a neural network. It
should be subclassed when implementing new types of layers.
Because each layer can keep track of the layer(s) feeding into it, a
network's output :class:`Layer` instance can double as a handle to the full
network.
Parameters
----------
inputs : a :class:`Layer` instance
The `Layer` class feeding into this layer.
name : a string or None
An optional name to attach to this layer.
"""
def __init__(
self,
inputs = None,
name ='layer'
):
self.inputs = inputs
if (name in set_keep['_layers_name_list']) and name_reuse == False:
raise Exception("Layer '%s' already exists, please choice other 'name'.\
\nHint : Use different name for different 'Layer' (The name is used to control parameter sharing)" % name)
else:
self.name = name
if name not in ['', None, False]:
set_keep['_layers_name_list'].append(name)
def print_params(self, details=True):
''' Print all info of parameters in the network'''
for i, p in enumerate(self.all_params):
if details:
try:
print(" param {:3}: {:15} (mean: {:<18}, median: {:<18}, std: {:<18}) {}".format(i, str(p.eval().shape), p.eval().mean(), np.median(p.eval()), p.eval().std(), p.name))
except:
raise Exception("Hint: print params details after sess.run(tf.initialize_all_variables()) or use network.print_params(False).")
else:
print(" param {:3}: {:15} {}".format(i, str(p.get_shape()), p.name))
print(" num of params: %d" % self.count_params())
def print_layers(self):
''' Print all info of layers in the network '''
for i, p in enumerate(self.all_layers):
print(" layer %d: %s" % (i, str(p)))
def count_params(self):
''' Return the number of parameters in the network '''
n_params = 0
for i, p in enumerate(self.all_params):
n = 1
# for s in p.eval().shape:
for s in p.get_shape():
try:
s = int(s)
except:
s = 1
if s:
n = n * s
n_params = n_params + n
return n_params
def __str__(self):
print("\nIt is a Layer class")
self.print_params(False)
self.print_layers()
return " Last layer is: %s" % self.__class__.__name__
## Input layer
class InputLayer(Layer):
"""
The :class:`InputLayer` class is the starting layer of a neural network.
Parameters
----------
inputs : a TensorFlow placeholder
The input tensor data.
name : a string or None
An optional name to attach to this layer.
n_features : a int
The number of features. If not specify, it will assume the input is
with the shape of [batch_size, n_features], then select the second
element as the n_features. It is used to specify the matrix size of
next layer. If apply Convolutional layer after InputLayer,
n_features is not important.
"""
def __init__(
self,
inputs = None,
n_features = None,
name ='input_layer'
):
Layer.__init__(self, inputs=inputs, name=name)
print(" tensorlayer:Instantiate InputLayer %s: %s" % (self.name, inputs._shape))
self.outputs = inputs
self.all_layers = []
self.all_params = []
self.all_drop = {}
## Word Embedding Input layer
class Word2vecEmbeddingInputlayer(Layer):
"""
The :class:`Word2vecEmbeddingInputlayer` class is a fully connected layer,
for Word Embedding. Words are input as integer index.
The output is the embedded word vector.
Parameters
----------
inputs : placeholder
For word inputs. integer index format.
train_labels : placeholder
For word labels. integer index format.
vocabulary_size : int
The size of vocabulary, number of words.
embedding_size : int
The number of embedding dimensions.
num_sampled : int
The Number of negative examples for NCE loss.
nce_loss_args : a dictionary
The arguments for tf.nn.nce_loss()
E_init : embedding initializer
The initializer for initializing the embedding matrix.
E_init_args : a dictionary
The arguments for embedding initializer
nce_W_init : NCE decoder biases initializer
The initializer for initializing the nce decoder weight matrix.
nce_W_init_args : a dictionary
The arguments for initializing the nce decoder weight matrix.
nce_b_init : NCE decoder biases initializer
The initializer for tf.get_variable() of the nce decoder bias vector.
nce_b_init_args : a dictionary
The arguments for tf.get_variable() of the nce decoder bias vector.
name : a string or None
An optional name to attach to this layer.
Variables
--------------
nce_cost : a tensor
The NCE loss.
outputs : a tensor
The outputs of embedding layer.
normalized_embeddings : tensor
Normalized embedding matrix
Examples
--------
- Without TensorLayer : see tensorflow/examples/tutorials/word2vec/word2vec_basic.py
>>> train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
>>> train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
>>> embeddings = tf.Variable(
... tf.random_uniform([vocabulary_size, embedding_size], -1.0, 1.0))
>>> embed = tf.nn.embedding_lookup(embeddings, train_inputs)
>>> nce_weights = tf.Variable(
... tf.truncated_normal([vocabulary_size, embedding_size],
... stddev=1.0 / math.sqrt(embedding_size)))
>>> nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
>>> cost = tf.reduce_mean(
... tf.nn.nce_loss(weights=nce_weights, biases=nce_biases,
... inputs=embed, labels=train_labels,
... num_sampled=num_sampled, num_classes=vocabulary_size,
... num_true=1))
- With TensorLayer : see tutorial_word2vec_basic.py
>>> train_inputs = tf.placeholder(tf.int32, shape=[batch_size])
>>> train_labels = tf.placeholder(tf.int32, shape=[batch_size, 1])
>>> emb_net = tl.layers.Word2vecEmbeddingInputlayer(
... inputs = train_inputs,
... train_labels = train_labels,
... vocabulary_size = vocabulary_size,
... embedding_size = embedding_size,
... num_sampled = num_sampled,
... nce_loss_args = {},
... E_init = tf.random_uniform,
... E_init_args = {'minval':-1.0, 'maxval':1.0},
... nce_W_init = tf.truncated_normal,
... nce_W_init_args = {'stddev': float(1.0/np.sqrt(embedding_size))},
... nce_b_init = tf.zeros,
... nce_b_init_args = {},
... name ='word2vec_layer',
... )
>>> cost = emb_net.nce_cost
>>> train_params = emb_net.all_params
>>> train_op = tf.train.GradientDescentOptimizer(learning_rate).minimize(
... cost, var_list=train_params)
>>> normalized_embeddings = emb_net.normalized_embeddings
References
----------
- `tensorflow/examples/tutorials/word2vec/word2vec_basic.py </~https://github.com/tensorflow/tensorflow/blob/r0.7/tensorflow/examples/tutorials/word2vec/word2vec_basic.py>`_
"""
def __init__(
self,
inputs = None,
train_labels = None,
vocabulary_size = 80000,
embedding_size = 200,
num_sampled = 64,
nce_loss_args = {},
E_init = tf.random_uniform_initializer(minval=-1.0, maxval=1.0),
E_init_args = {},
nce_W_init = tf.truncated_normal_initializer(stddev=0.03),
nce_W_init_args = {},
nce_b_init = tf.constant_initializer(value=0.0),
nce_b_init_args = {},
name ='word2vec_layer',
):
Layer.__init__(self, name=name)
self.inputs = inputs
print(" tensorlayer:Instantiate Word2vecEmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size))
# Look up embeddings for inputs.
# Note: a row of 'embeddings' is the vector representation of a word.
# for the sake of speed, it is better to slice the embedding matrix
# instead of transfering a word id to one-hot-format vector and then
# multiply by the embedding matrix.
# embed is the outputs of the hidden layer (embedding layer), it is a
# row vector with 'embedding_size' values.
with tf.variable_scope(name) as vs:
embeddings = tf.get_variable(name='embeddings',
shape=(vocabulary_size, embedding_size),
initializer=E_init,
**E_init_args)
embed = tf.nn.embedding_lookup(embeddings, self.inputs)
# Construct the variables for the NCE loss (i.e. negative sampling)
nce_weights = tf.get_variable(name='nce_weights',
shape=(vocabulary_size, embedding_size),
initializer=nce_W_init,
**nce_W_init_args)
nce_biases = tf.get_variable(name='nce_biases',
shape=(vocabulary_size),
initializer=nce_b_init,
**nce_b_init_args)
# Compute the average NCE loss for the batch.
# tf.nce_loss automatically draws a new sample of the negative labels
# each time we evaluate the loss.
self.nce_cost = tf.reduce_mean(
tf.nn.nce_loss(weights=nce_weights, biases=nce_biases,
inputs=embed, labels=train_labels,
num_sampled=num_sampled, num_classes=vocabulary_size,
**nce_loss_args))
self.outputs = embed
self.normalized_embeddings = tf.nn.l2_normalize(embeddings, 1)
self.all_layers = [self.outputs]
self.all_params = [embeddings, nce_weights, nce_biases]
self.all_drop = {}
class EmbeddingInputlayer(Layer):
"""
The :class:`EmbeddingInputlayer` class is a fully connected layer,
for Word Embedding. Words are input as integer index.
The output is the embedded word vector.
This class can not be used to train a word embedding matrix, so you should
assign a trained matrix into it. To train a word embedding matrix, you can used
class:`Word2vecEmbeddingInputlayer`.
Note that, do not update this embedding matrix.
Parameters
----------
inputs : placeholder
For word inputs. integer index format.
a 2D tensor : [batch_size, num_steps(num_words)]
vocabulary_size : int
The size of vocabulary, number of words.
embedding_size : int
The number of embedding dimensions.
E_init : embedding initializer
The initializer for initializing the embedding matrix.
E_init_args : a dictionary
The arguments for embedding initializer
name : a string or None
An optional name to attach to this layer.
Variables
------------
outputs : a tensor
The outputs of embedding layer.
the outputs 3D tensor : [batch_size, num_steps(num_words), embedding_size]
Examples
--------
>>> vocabulary_size = 50000
>>> embedding_size = 200
>>> model_file_name = "model_word2vec_50k_200"
>>> batch_size = None
...
>>> all_var = tl.files.load_npy_to_any(name=model_file_name+'.npy')
>>> data = all_var['data']; count = all_var['count']
>>> dictionary = all_var['dictionary']
>>> reverse_dictionary = all_var['reverse_dictionary']
>>> tl.files.save_vocab(count, name='vocab_'+model_file_name+'.txt')
>>> del all_var, data, count
...
>>> load_params = tl.files.load_npz(name=model_file_name+'.npz')
>>> x = tf.placeholder(tf.int32, shape=[batch_size])
>>> y_ = tf.placeholder(tf.int32, shape=[batch_size, 1])
>>> emb_net = tl.layers.EmbeddingInputlayer(
... inputs = x,
... vocabulary_size = vocabulary_size,
... embedding_size = embedding_size,
... name ='embedding_layer')
>>> sess.run(tf.initialize_all_variables())
>>> tl.files.assign_params(sess, [load_params[0]], emb_net)
>>> word = b'hello'
>>> word_id = dictionary[word]
>>> print('word_id:', word_id)
... 6428
...
>>> words = [b'i', b'am', b'hao', b'dong']
>>> word_ids = tl.files.words_to_word_ids(words, dictionary)
>>> context = tl.files.word_ids_to_words(word_ids, reverse_dictionary)
>>> print('word_ids:', word_ids)
... [72, 1226, 46744, 20048]
>>> print('context:', context)
... [b'i', b'am', b'hao', b'dong']
...
>>> vector = sess.run(emb_net.outputs, feed_dict={x : [word_id]})
>>> print('vector:', vector.shape)
... (1, 200)
>>> vectors = sess.run(emb_net.outputs, feed_dict={x : word_ids})
>>> print('vectors:', vectors.shape)
... (4, 200)
"""
def __init__(
self,
inputs = None,
vocabulary_size = 80000,
embedding_size = 200,
E_init = tf.random_uniform_initializer(-0.1, 0.1),
E_init_args = {},
name ='embedding_layer',
):
Layer.__init__(self, name=name)
self.inputs = inputs
print(" tensorlayer:Instantiate EmbeddingInputlayer %s: (%d, %d)" % (self.name, vocabulary_size, embedding_size))
with tf.variable_scope(name) as vs:
embeddings = tf.get_variable(name='embeddings',
shape=(vocabulary_size, embedding_size),
initializer=E_init,
**E_init_args)
embed = tf.nn.embedding_lookup(embeddings, self.inputs)
self.outputs = embed
self.all_layers = [self.outputs]
self.all_params = [embeddings]
self.all_drop = {}
## Dense layer
class DenseLayer(Layer):
"""
The :class:`DenseLayer` class is a fully connected layer.
Parameters
----------
layer : a :class:`Layer` instance
The `Layer` class feeding into this layer.
n_units : int
The number of units of the layer.
act : activation function
The function that is applied to the layer activations.
W_init : weights initializer
The initializer for initializing the weight matrix.
b_init : biases initializer or None
The initializer for initializing the bias vector. If None, skip biases.
W_init_args : dictionary
The arguments for the weights tf.get_variable.
b_init_args : dictionary
The arguments for the biases tf.get_variable.
name : a string or None
An optional name to attach to this layer.
Examples
--------
>>> network = tl.layers.InputLayer(x, name='input_layer')
>>> network = tl.layers.DenseLayer(
... network,
... n_units=800,
... act = tf.nn.relu,
... W_init=tf.truncated_normal_initializer(stddev=0.1),
... name ='relu_layer'
... )
>>> Without TensorLayer, you can do as follow.
>>> W = tf.Variable(
... tf.random_uniform([n_in, n_units], -1.0, 1.0), name='W')
>>> b = tf.Variable(tf.zeros(shape=[n_units]), name='b')
>>> y = tf.nn.relu(tf.matmul(inputs, W) + b)
Notes
-----
If the input to this layer has more than two axes, it need to flatten the
input by using :class:`FlattenLayer` in this case.
"""
def __init__(
self,
layer = None,
n_units = 100,
act = tf.identity,
W_init = tf.truncated_normal_initializer(stddev=0.1),
b_init = tf.constant_initializer(value=0.0),
W_init_args = {},
b_init_args = {},
name ='dense_layer',
):
Layer.__init__(self, name=name)
self.inputs = layer.outputs
if self.inputs.get_shape().ndims != 2:
raise Exception("The input dimension must be rank 2, please reshape or flatten it")
n_in = int(self.inputs._shape[-1])
self.n_units = n_units
print(" tensorlayer:Instantiate DenseLayer %s: %d, %s" % (self.name, self.n_units, act.__name__))
with tf.variable_scope(name) as vs:
W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, **W_init_args )
if b_init:
b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, **b_init_args )
self.outputs = act(tf.matmul(self.inputs, W) + b)
else:
self.outputs = act(tf.matmul(self.inputs, W))
# Hint : list(), dict() is pass by value (shallow), without them, it is
# pass by reference.
self.all_layers = list(layer.all_layers)
self.all_params = list(layer.all_params)
self.all_drop = dict(layer.all_drop)
self.all_layers.extend( [self.outputs] )
if b_init:
self.all_params.extend( [W, b] )
else:
self.all_params.extend( [W] )
class ReconLayer(DenseLayer):
"""
The :class:`ReconLayer` class is a reconstruction layer `DenseLayer` which
use to pre-train a `DenseLayer`.
Parameters
----------
layer : a :class:`Layer` instance
The `Layer` class feeding into this layer.
x_recon : tensorflow variable
The variables used for reconstruction.
name : a string or None
An optional name to attach to this layer.
n_units : int
The number of units of the layer, should be equal to x_recon
act : activation function
The activation function that is applied to the reconstruction layer.
Normally, for sigmoid layer, the reconstruction activation is sigmoid;
for rectifying layer, the reconstruction activation is softplus.
Examples
--------
>>> network = tl.layers.InputLayer(x, name='input_layer')
>>> network = tl.layers.DenseLayer(network, n_units=196,
... act=tf.nn.sigmoid, name='sigmoid1')
>>> recon_layer1 = tl.layers.ReconLayer(network, x_recon=x, n_units=784,
... act=tf.nn.sigmoid, name='recon_layer1')
>>> recon_layer1.pretrain(sess, x=x, X_train=X_train, X_val=X_val,
... denoise_name=None, n_epoch=1200, batch_size=128,
... print_freq=10, save=True, save_name='w1pre_')
Methods
-------
pretrain(self, sess, x, X_train, X_val, denoise_name=None, n_epoch=100, batch_size=128, print_freq=10, save=True, save_name='w1pre_')
Start to pre-train the parameters of previous DenseLayer.
Notes
-----
The input layer should be `DenseLayer` or a layer has only one axes.
You may need to modify this part to define your own cost function.
By default, the cost is implemented as follow:
- For sigmoid layer, the implementation can be `UFLDL <http://deeplearning.stanford.edu/wiki/index.php/UFLDL_Tutorial>`_
- For rectifying layer, the implementation can be `Glorot (2011). Deep Sparse Rectifier Neural Networks <http://doi.org/10.1.1.208.6449>`_
"""
def __init__(
self,
layer = None,
x_recon = None,
name = 'recon_layer',
n_units = 784,
act = tf.nn.softplus,
):
DenseLayer.__init__(self, layer=layer, n_units=n_units, act=act, name=name)
print(" tensorlayer: %s is a ReconLayer" % self.name)
# y : reconstruction outputs; train_params : parameters to train
# Note that: train_params = [W_encoder, b_encoder, W_decoder, b_encoder]
y = self.outputs
self.train_params = self.all_params[-4:]
# =====================================================================
#
# You need to modify the below cost function and optimizer so as to
# implement your own pre-train method.
#
# =====================================================================
lambda_l2_w = 0.004
learning_rate = 0.0001
print(" lambda_l2_w: %f" % lambda_l2_w)
print(" learning_rate: %f" % learning_rate)
# Mean-squre-error i.e. quadratic-cost
mse = tf.reduce_sum(tf.squared_difference(y, x_recon), reduction_indices = 1)
mse = tf.reduce_mean(mse) # in theano: mse = ((y - x) ** 2 ).sum(axis=1).mean()
# mse = tf.reduce_mean(tf.reduce_sum(tf.square(tf.sub(y, x_recon)), reduction_indices = 1))
# mse = tf.reduce_mean(tf.squared_difference(y, x_recon)) # <haodong>: Error
# mse = tf.sqrt(tf.reduce_mean(tf.square(y - x_recon))) # <haodong>: Error
# Cross-entropy
# ce = cost.cross_entropy(y, x_recon) # <haodong>: list , list , Error (only be used for softmax output)
# ce = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(y, x_recon)) # <haodong>: list , list , Error (only be used for softmax output)
# ce = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(y, x_recon)) # <haodong>: list , index , Error (only be used for softmax output)
L2_w = tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[0]) \
+ tf.contrib.layers.l2_regularizer(lambda_l2_w)(self.train_params[2]) # faster than the code below
# L2_w = lambda_l2_w * tf.reduce_mean(tf.square(self.train_params[0])) + lambda_l2_w * tf.reduce_mean( tf.square(self.train_params[2]))
# DropNeuro
P_o = cost.lo_regularizer(0.03)(self.train_params[0]) # + cost.lo_regularizer(0.5)(self.train_params[2]) # <haodong>: if add lo on decoder, no neuron will be broken
P_i = cost.li_regularizer(0.03)(self.train_params[0]) # + cost.li_regularizer(0.001)(self.train_params[2])
# L1 of activation outputs
activation_out = self.all_layers[-2]
L1_a = 0.001 * tf.reduce_mean(activation_out) # <haodong>: theano: T.mean( self.a[i] ) # some neuron are broken, white and black
# L1_a = 0.001 * tf.reduce_mean( tf.reduce_sum(activation_out, reduction_indices=0) ) # <haodong>: some neuron are broken, white and black
# L1_a = 0.001 * 100 * tf.reduce_mean( tf.reduce_sum(activation_out, reduction_indices=1) ) # <haodong>: some neuron are broken, white and black
# KL Divergence
beta = 4
rho = 0.15
p_hat = tf.reduce_mean(activation_out, reduction_indices = 0) # theano: p_hat = T.mean( self.a[i], axis=0 )
KLD = beta * tf.reduce_sum( rho * tf.log(tf.div(rho, p_hat)) + (1- rho) * tf.log((1- rho)/ (tf.sub(float(1), p_hat))) )
# KLD = beta * tf.reduce_sum( rho * tf.log(rho/ p_hat) + (1- rho) * tf.log((1- rho)/(1- p_hat)) )
# theano: L1_a = l1_a[i] * T.sum( rho[i] * T.log(rho[i]/ p_hat) + (1- rho[i]) * T.log((1- rho[i])/(1- p_hat)) )
# Total cost
if act == tf.nn.softplus:
print(' use: mse, L2_w, L1_a')
self.cost = mse + L1_a + L2_w
elif act == tf.nn.sigmoid:
# ----------------------------------------------------
# Cross-entropy was used in Denoising AE
# print(' use: ce, L2_w, KLD')
# self.cost = ce + L2_w + KLD
# ----------------------------------------------------
# Mean-squared-error was used in Vanilla AE
print(' use: mse, L2_w, KLD')
self.cost = mse + L2_w + KLD
# ----------------------------------------------------
# Add DropNeuro penalty (P_o) can remove neurons of AE
# print(' use: mse, L2_w, KLD, P_o')
# self.cost = mse + L2_w + KLD + P_o
# ----------------------------------------------------
# Add DropNeuro penalty (P_i) can remove neurons of previous layer
# If previous layer is InputLayer, it means remove useless features
# print(' use: mse, L2_w, KLD, P_i')
# self.cost = mse + L2_w + KLD + P_i
else:
raise Exception("Don't support the given reconstruct activation function")
self.train_op = tf.train.AdamOptimizer(learning_rate, beta1=0.9, beta2=0.999,
epsilon=1e-08, use_locking=False).minimize(self.cost, var_list=self.train_params)
# self.train_op = tf.train.GradientDescentOptimizer(1.0).minimize(self.cost, var_list=self.train_params)
def pretrain(self, sess, x, X_train, X_val, denoise_name=None, n_epoch=100, batch_size=128, print_freq=10,
save=True, save_name='w1pre_'):
# ====================================================
#
# You need to modify the cost function in __init__() so as to
# get your own pre-train method.
#
# ====================================================
print(" tensorlayer: %s start pretrain" % self.name)
print(" batch_size: %d" % batch_size)
if denoise_name:
print(" denoising layer keep: %f" % self.all_drop[set_keep[denoise_name]])
dp_denoise = self.all_drop[set_keep[denoise_name]]
else:
print(" no denoising layer")
for epoch in range(n_epoch):
start_time = time.time()
for X_train_a, _ in iterate.minibatches(X_train, X_train, batch_size, shuffle=True):
dp_dict = utils.dict_to_one( self.all_drop )
if denoise_name:
dp_dict[set_keep[denoise_name]] = dp_denoise
feed_dict = {x: X_train_a}
feed_dict.update(dp_dict)
sess.run(self.train_op, feed_dict=feed_dict)
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
print("Epoch %d of %d took %fs" % (epoch + 1, n_epoch, time.time() - start_time))
train_loss, n_batch = 0, 0
for X_train_a, _ in iterate.minibatches(X_train, X_train, batch_size, shuffle=True):
dp_dict = utils.dict_to_one( self.all_drop )
feed_dict = {x: X_train_a}
feed_dict.update(dp_dict)
err = sess.run(self.cost, feed_dict=feed_dict)
train_loss += err
n_batch += 1
print(" train loss: %f" % (train_loss/ n_batch))
val_loss, n_batch = 0, 0
for X_val_a, _ in iterate.minibatches(X_val, X_val, batch_size, shuffle=True):
dp_dict = utils.dict_to_one( self.all_drop )
feed_dict = {x: X_val_a}
feed_dict.update(dp_dict)
err = sess.run(self.cost, feed_dict=feed_dict)
val_loss += err
n_batch += 1
print(" val loss: %f" % (val_loss/ n_batch))
if save:
try:
visualize.W(self.train_params[0].eval(), second=10, saveable=True, shape=[28,28], name=save_name+str(epoch+1), fig_idx=2012)
files.save_npz([self.all_params[0]] , name=save_name+str(epoch+1)+'.npz')
except:
raise Exception("You should change the visualize.W() in ReconLayer.pretrain(), if you want to save the feature images for different dataset")
## Noise layer
class DropoutLayer(Layer):
"""
The :class:`DropoutLayer` class is a noise layer which randomly set some
values to zero by a given keeping probability.
Parameters
----------
layer : a :class:`Layer` instance
The `Layer` class feeding into this layer.
keep : float
The keeping probability, the lower more values will be set to zero.
name : a string or None
An optional name to attach to this layer.
Examples
--------
- Define network
>>> network = tl.layers.InputLayer(x, name='input_layer')
>>> network = tl.layers.DropoutLayer(network, keep=0.8, name='drop1')
>>> network = tl.layers.DenseLayer(network, n_units=800, act = tf.nn.relu, name='relu1')
>>> ...
- For training
>>> feed_dict = {x: X_train_a, y_: y_train_a}
>>> feed_dict.update( network.all_drop ) # enable noise layers
>>> sess.run(train_op, feed_dict=feed_dict)
>>> ...
- For testing
>>> dp_dict = tl.utils.dict_to_one( network.all_drop ) # disable noise layers
>>> feed_dict = {x: X_val_a, y_: y_val_a}
>>> feed_dict.update(dp_dict)
>>> err, ac = sess.run([cost, acc], feed_dict=feed_dict)
>>> ...
"""
def __init__(
self,
layer = None,
keep = 0.5,
name = 'dropout_layer',
):
Layer.__init__(self, name=name)
self.inputs = layer.outputs
print(" tensorlayer:Instantiate DropoutLayer %s: keep: %f" % (self.name, keep))
# The name of placeholder for keep_prob is the same with the name
# of the Layer.
set_keep[name] = tf.placeholder(tf.float32)
self.outputs = tf.nn.dropout(self.inputs, set_keep[name], name=name) # 1.2
self.all_layers = list(layer.all_layers)
self.all_params = list(layer.all_params)
self.all_drop = dict(layer.all_drop)
self.all_drop.update( {set_keep[name]: keep} )
self.all_layers.extend( [self.outputs] )
# print(set_keep[name])
# Tensor("Placeholder_2:0", dtype=float32)
# print(denoising1)
# Tensor("Placeholder_2:0", dtype=float32)
# print(self.all_drop[denoising1])
# 0.8
#
# https://www.tensorflow.org/versions/r0.8/tutorials/mnist/tf/index.html
# The optional feed_dict argument allows the caller to override the
# value of tensors in the graph. Each key in feed_dict can be one of
# the following types:
# If the key is a Tensor, the value may be a Python scalar, string,
# list, or numpy ndarray that can be converted to the same dtype as that
# tensor. Additionally, if the key is a placeholder, the shape of the
# value will be checked for compatibility with the placeholder.
# If the key is a SparseTensor, the value should be a SparseTensorValue.
class DropconnectDenseLayer(Layer):
"""
The :class:`DropconnectDenseLayer` class is ``DenseLayer`` with DropConnect
behaviour which randomly remove connection between this layer to previous
layer by a given keeping probability.
Parameters
----------
layer : a :class:`Layer` instance
The `Layer` class feeding into this layer.
keep : float
The keeping probability, the lower more values will be set to zero.
n_units : int
The number of units of the layer.
act : activation function
The function that is applied to the layer activations.
W_init : weights initializer
The initializer for initializing the weight matrix.
b_init : biases initializer
The initializer for initializing the bias vector.
W_init_args : dictionary
The arguments for the weights tf.get_variable().
b_init_args : dictionary
The arguments for the biases tf.get_variable().
name : a string or None
An optional name to attach to this layer.
Examples
--------
>>> network = tl.layers.InputLayer(x, name='input_layer')
>>> network = tl.layers.DropconnectDenseLayer(network, keep = 0.8,
... n_units=800, act = tf.nn.relu, name='dropconnect_relu1')
>>> network = tl.layers.DropconnectDenseLayer(network, keep = 0.5,
... n_units=800, act = tf.nn.relu, name='dropconnect_relu2')
>>> network = tl.layers.DropconnectDenseLayer(network, keep = 0.5,
... n_units=10, act = tl.activation.identity, name='output_layer')
References
----------
- `Wan, L. (2013). Regularization of neural networks using dropconnect <http://machinelearning.wustl.edu/mlpapers/papers/icml2013_wan13>`_
"""
def __init__(
self,
layer = None,
keep = 0.5,
n_units = 100,
act = tf.identity,
W_init = tf.truncated_normal_initializer(stddev=0.1),
b_init = tf.constant_initializer(value=0.0),
W_init_args = {},
b_init_args = {},
name ='dropconnect_layer',
):
Layer.__init__(self, name=name)
self.inputs = layer.outputs
if self.inputs.get_shape().ndims != 2:
raise Exception("The input dimension must be rank 2")
n_in = int(self.inputs._shape[-1])
self.n_units = n_units
print(" tensorlayer:Instantiate DropconnectDenseLayer %s: %d, %s" % (self.name, self.n_units, act.__name__))
with tf.variable_scope(name) as vs:
W = tf.get_variable(name='W', shape=(n_in, n_units), initializer=W_init, **W_init_args )
b = tf.get_variable(name='b', shape=(n_units), initializer=b_init, **b_init_args )
self.outputs = act(tf.matmul(self.inputs, W) + b)#, name=name) # 1.2
set_keep[name] = tf.placeholder(tf.float32)
W_dropcon = tf.nn.dropout(W, set_keep[name])
self.outputs = act(tf.matmul(self.inputs, W_dropcon) + b)
self.all_layers = list(layer.all_layers)
self.all_params = list(layer.all_params)
self.all_drop = dict(layer.all_drop)
self.all_drop.update( {set_keep[name]: keep} )
self.all_layers.extend( [self.outputs] )
self.all_params.extend( [W, b] )
## Convolutional layer (Pro)
class Conv1dLayer(Layer):
"""
The :class:`Conv1dLayer` class is a 1D CNN layer, see `tf.nn.conv1d <https://www.tensorflow.org/versions/master/api_docs/python/nn.html#conv1d>`_.
Parameters
----------
layer : a :class:`Layer` instance
The `Layer` class feeding into this layer, [batch, in_width, in_channels].
act : activation function, None for identity.
shape : list of shape
shape of the filters, [filter_length, in_channels, out_channels].
strides : a list of ints.
The stride of the sliding window for each dimension of input.\n
It Must be in the same order as the dimension specified with format.
padding : a string from: "SAME", "VALID".
The type of padding algorithm to use.
use_cudnn_on_gpu : An optional bool. Defaults to True.
data_format : An optional string from "NHWC", "NCHW". Defaults to "NHWC", the data is stored in the order of [batch, in_width, in_channels]. The "NCHW" format stores data as [batch, in_channels, in_width].
W_init : weights initializer
The initializer for initializing the weight matrix.
b_init : biases initializer or None
The initializer for initializing the bias vector. If None, skip biases.
W_init_args : dictionary
The arguments for the weights tf.get_variable().
b_init_args : dictionary
The arguments for the biases tf.get_variable().
name : a string or None
An optional name to attach to this layer.
"""
def __init__(
self,
layer = None,
act = tf.identity,
shape = [5, 5, 1],
strides=[1, 1, 1],
padding='SAME',
use_cudnn_on_gpu=None,
data_format=None,
W_init = tf.truncated_normal_initializer(stddev=0.02),
b_init = tf.constant_initializer(value=0.0),
W_init_args = {},
b_init_args = {},
name ='cnn_layer',
):
Layer.__init__(self, name=name)
self.inputs = layer.outputs
print(" tensorlayer:Instantiate Conv1dLayer %s: %s, %s, %s, %s" %
(self.name, str(shape), str(strides), padding, act.__name__))