Exercises in Refresher_numpy are repeated for tensorflow version
import tensorflow as tf
import numpy as np
sess = tf.Session()
const = tf.constant(10)
sess.run(const)
arange = tf.range(0,10,1)
print(arange)
print(arange.op)
sess.run(arange)
linspace = tf.linspace(0.,10.,10) #start stop must be float
print(linspace.op)
sess.run(linspace)
zeros = tf.zeros((3,5)) #or tf.ones
print(zeros.op)
sess.run(zeros)
fill = tf.fill((3,5),4)
print(fill.op)
sess.run(fill)
fill_const = tf.constant(4,shape=(3,5))
print(fill_const.op)
sess.run(fill_const)
rnd_normal = tf.random_normal((3,5),1,0.01)
print(rnd_normal.op)
sess.run(rnd_normal)
Using random seed
tf.set_random_seed(99)
rnd_int = tf.random_uniform((3,5),0,10,dtype=tf.int32,seed=101)
sess.run(rnd_int)
Everything works same as numpy
const = tf.range(0,15,1)
sess.run(const)
reshape = tf.reshape(const,(3,5))
print(reshape)
sess.run(reshape)
reshape = tf.reshape(reshape,(-1,)) #dont miss the comma
print(reshape)
sess.run(reshape)
reshape = tf.reshape(reshape,(5,-1))
print(reshape)
sess.run(reshape)
Reshape leaving the batch axis
ones = tf.ones((5,5,5))
print(ones)
reshape = tf.reshape(ones,(-1,25))
print(reshape)
max_v = tf.reduce_max(const)
sess.run(max_v)
min_v = tf.reduce_min(const)
sess.run(min_v)
mean_v = tf.reduce_mean(const)
sess.run(mean_v)
#Index location of max value
argmax_v = tf.argmax(const)
sess.run(argmax_v)
Everything including syntax works same as numpy
To grab specific columns or rows, use tf.nn.embedding_lookup
mat = tf.reshape(tf.range(0,100,1),(10,10))
sess.run(mat)
sess.run(mat[0,:])#First row
sess.run(tf.nn.embedding_lookup(mat,[0,2])) #First and third row
sess.run(mat[:,0])#First column
sess.run(tf.nn.embedding_lookup(tf.transpose(mat),[0,2])) #First and third column
sess.run(mat[:2,:3]) #slice first 2 rows and 3 columns
sess.run(mat[2:4,5:-2]) #slice from 2nd until 4th row, from 5th until last second column
sess.run(mat[:5:2,:5:2]) #slice every 2nd element from first 5 rows and column
sess.run(mat[::2,::2])#slice every 2nd element in whole matrix
sess.run(mat[2::2,3::2]) #every 2nd element starting from index 2 of row and index 3 of column
sess.run(mat[::-1,::-1]) #reverse the entire matrix
sess.run(mat[3::-1,3::-1]) #pick up the first 3 rows and column and reverse them
tf.boolean_mask should be used
my_filter = mat > 50
print(my_filter)
sess.run(my_filter)
sess.run(tf.boolean_mask(mat,mat>50)) #Equivalent to mat[mat>50] ==> mat[my_filter]
Reassignment to same tensor does not work in tensorflow
sess.run(tf.where(mat>50,mat*-10,mat)) #prefer tf.less instead of <
#sess.run(tf.where(mat>50,-10,mat)) #Does not work. Both results should confirm to tensor dim. use mat*0-10
sess.run(tf.where(tf.equal(mat%2,0)))
#sess.run(tf.where(mat%2 == 0)) #does not work
To get elements corresponding to indices, flatten the tensor and use tf.nn.embedding_lookup
mat_flat = tf.reshape(mat,(-1,))
idx = tf.where(tf.equal(mat_flat%2,0))
elx = tf.nn.embedding_lookup(mat_flat,idx)
sess.run(elx)
This is done using tf.scatter. See Sampling section for examples
This cannot be done in the numpy way as reassignment to tensors is not allowed.
We cant use
mat[idx] = func(mat[idx])
We can manipulate whole column or rows as follows
a = tf.reshape(tf.range(0,15),(5,3))
sess.run(a)
Normalizing all columns
def normalize(a): #can be any function
#see broadcasting to understand this computation
b = (a - tf.reduce_min(a))/(tf.reduce_max(a)-tf.reduce_min(a))
return b
l = []
for i in range(a.shape[1]):
b = tf.expand_dims(normalize(a[:,i]),axis=1) #concat requires more than 1D
l.append(b)
c = tf.concat(l,axis=1)
sess.run(c)
Normalizing all rows
l = []
for i in range(a.shape[0]):
b = tf.expand_dims(normalize(a[i,:]),axis=0) #concat requires more than 1D
l.append(b)
c = tf.concat(l,axis=0)
sess.run(c)
Works same as numpy
arr = tf.ones((5,4))
sess.run(arr)
sess.run(arr + 1) #(5,4) + (1,)
sess.run(arr + [1,2,3,4]) #(5,4) + (4,)
sess.run(arr + tf.reshape(tf.constant([1.,2.,3.,4.,5.]),(5,1)))#(5,4) + (5,1)
features = tf.random_uniform((10,3),0,101,dtype=tf.int32)
labels = tf.random_uniform((10,1),0,2,dtype=tf.int32)
print(features)
print(labels)
sess.run(tf.concat([features,labels],axis=1)) #same syntax as numpy
x = tf.lin_space(0.,10.,1000)
print(x)
sample_size = 10
rand_ind = tf.random_uniform((sample_size,),0,x.shape[0],dtype=tf.int32)
sess.run(rand_ind)
sample_x = tf.nn.embedding_lookup(x,rand_ind)
print(sample_x)
sess.run(sample_x)
Another example
Add a delta to the sampled points
tf.scatter_add illustration
x = tf.range(50,75,dtype=tf.float32)
sess.run(x)
sample_size = 5
rand_ind = tf.random_uniform((sample_size,),0,x.shape[0],dtype=tf.int32)
sess.run(rand_ind)
#tf.scatter needs a variable
x_var = tf.Variable(x)
#Update should be of size 1 or of size rand_ind
x_delta = tf.scatter_add(x_var,rand_ind,0.33)
#x_delta = tf.scatter_add(x_var,rand_ind,tf.constant([1.3,2.3,3.3,4.3,5.3]))
sess.run(tf.global_variables_initializer())
sess.run(x_delta)
Apply arbitary function to the sampled points
tf.scatter_update illustration
#Makes the elements 33% of its original value
def func(X,ind):
t = tf.nn.embedding_lookup(X,ind)
t = t*0.33
return t
x_var = tf.Variable(x)
t = func(x,rand_ind)
x_delta = tf.scatter_update(x_var,rand_ind,func(x,rand_ind))
sess.run(tf.global_variables_initializer())
sess.run([x_delta,rand_ind,t])
sess.close()