In [1]:
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
In [82]:
class Optimizer():
def __init__(self):
self.sess = tf.Session()
init_op = tf.global_variables_initializer()
self.sess.run(init_op);
self.variables = tf.global_variables()
self.generateInitialPoblation()
def printData(self):
iterator = self.data.make_initializable_iterator()
self.sess.run(iterator.initializer)
next_element = iterator.get_next()
item = next_element
while True:
try:
item =self.sess.run(next_element)
print(item)
except tf.errors.OutOfRangeError:
break
def setFunction(self,model):
self.model = model
def evaluationSurvival(self,function = None , poblation = None ):
poblation = self.data
function = self.evaluationTensor
def temporal(*x):
return {"people":x,"survive":self.evaluationTensor(x)}
self.data = poblation.map(temporal)
return self.data
def evaluationTensor(self,x):
i= 0
print(x)
print(self.variables)
for variable in x:
print(self.variables[i])
print(self.sess.run(self.variables[i]))
print(variable)
#self.sess.run(self.variables[i].assign(variable ) )
i +=1
return self.sess.run(self.model)
def generateInitialPoblation(self,cant = 30):
rows = []
for variable in self.variables:
col = []
for i in range(cant):
col.append(self.sess.run(tf.random.uniform(variable.shape)))
rows.append(col)
self.data = tf.data.Dataset.from_tensor_slices(tuple(rows))
def interation(self,dataset = None):
dataset = self.data
dataset = self.evaluationSurvival(poblation=dataset)
iterator = dataset.make_initializable_iterator()
sess.run(iterator.initializer)
next_element = iterator.get_next()
item = next_element
while True:
try:
compare= tf.less(item["survive"],sess.run(next_element)["survive"])
if sess.run(compare):
item =sess.run(next_element)
print(item)
except tf.errors.OutOfRangeError:
break
self.data = self.fixFunction()
def fixFunction(self,poblation = None):
poblation = self.data
function = self.evaluationTensor
def temporal(*x):
x = list(x)
newpeople = tuple(np.array(x[0]["people"]) + np.array(x[0]["people"]) / 2) #put some function
return {"people":newpeople,"survive":-1 * self.evaluationTensor(newpeople)}
self.data = poblation.map(temporal)
return self.data
In [51]:
features, labels = (np.random.sample((100,1)), np.random.sample((100,1)))
dataset = tf.data.Dataset.from_tensor_slices((features,labels))
sess = tf.Session()
def functionDummy(*x):
total = 0
print(x)
for i in x:
total = pow(i,2)
return {"people":x,"survive":-total}
dataset = dataset.map(functionDummy)
#maxi = tf.math.argmax(dataset)
#print(maxi)
iterator = dataset.make_one_shot_iterator()
next_element = iterator.get_next()
item = next_element
while True:
try:
compare= tf.less(item["survive"],sess.run(next_element)["survive"])
if sess.run(compare):
item =sess.run(next_element)
except tf.errors.OutOfRangeError:
break
def fixFunction(*x):
p
newpeople = x["people"] + x["people"] / 2
return {"people":newpeople,"survive":functionDummy(newpeople)}
#dataset = dataset.map(fixFunction)
In [76]:
dicc = dict()
In [77]:
dicc["amarillo"] = 1
In [79]:
dicc["amarillo"] = 3
print(dicc)
In [88]:
- pow(0.1205712,2) + pow(0.04627164,2)
Out[88]:
In [91]:
pow(0.53631437,2) + pow(0.14657798,2)
Out[91]:
In [99]:
functionDummy(0.6784184, 0.14471732)
Out[99]:
In [4]:
x = tf.Variable(-0.5, np.float32)
y = tf.Variable(+3.5, np.float32)
f = tf.add(tf.multiply(tf.constant(100.),
tf.pow(tf.subtract(tf.pow(x, 2), y), 2)),
tf.pow(tf.subtract(tf.constant(1.0), x), 2))
#with tf.name_scope("inner"):
# c_3 = tf.Variable(+3.0, np.float32,name="c")
In [7]:
type(f)
Out[7]:
In [42]:
sess = tf.Session()
init_op = tf.initialize_all_variables()
rows = []
for variable in tf.global_variables():
col = []
for i in range(30):
col.append( tf.random.uniform(variable.shape))
rows.append(col)
data = tf.data.Dataset.from_tensor_slices(tuple(rows))
sess = tf.Session()
iterator = data.make_initializable_iterator()
sess.run(iterator.initializer)
next_element = iterator.get_next()
sess.run(init_op)
item = next_element
#while True:
# try:
item =sess.run(next_element)
print(item)
i = 0
variables = tf.global_variables()
for a in item:
print(a)
#print(sess.run(a))
#rand = tf.random.uniform(varibles[1].shape)
sess.run(variables[i].assign(a))
i = 1 + i
print(sess.run(f))
sess.run(f)
# except tf.errors.OutOfRangeError:
# break
# Later, when launching the model
In [62]:
sess = tf.Session()
rows = []
for variable in tf.global_variables():
col = []
for i in range(30):
col.append( tf.random.uniform(variable.shape))
rows.append(col)
data = tf.data.Dataset.from_tensor_slices(tuple(rows))
iterator = data.make_initializable_iterator()
sess.run(iterator.initializer)
next_element = iterator.get_next()
item = next_element
while True:
try:
item =sess.run(next_element)
print(item)
except tf.errors.OutOfRangeError:
break
In [83]:
optimizer = Optimizer()
print(optimizer.variables)
optimizer.setFunction(f)
print(optimizer.model)
optimizer.printData()
print("ejecucion")
optimizer.interation()
optimizer.printData()
In [18]:
type(sess.run(tf.random.uniform([1,2])))
Out[18]:
In [74]:
np.array((1, 2)) / 2
Out[74]:
In [2]:
x = tf.Variable(-20, np.float32)
y = tf.Variable(-30, np.float32)
f = tf.pow(tf.add(x,y), 2)
In [25]:
T = 1000
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op);
#T = tf.Variable(T0,trainable=False)
f_old = f.eval()
x_old = x.eval()
y_old = y.eval()
#w= tf.assign(f,2)
x_update = x.assign(x.eval() - 1)
y_update = y.assign(y.eval() )
#x_reupdate = x.assign(x.eval() - 1)
#ejecutar session
with tf.control_dependencies([x_update,y_update]):
#print(sess.run([ y_update, x_update]))
x_update_err = x_update
y_update_err = y_update
delta = f.eval() - f_old
if delta > 0 :
print("delta")
next
elif pow(np.e, -delta/ T) >= np.random.uniform():
print("probablidad" + str(pow(np.e, -delta/ T)))
next
else:
x_update_err = x.assign(x_old)
y_update_err = y.assign(y_old)
#print(type(x_update_err))
sess.run([y_update_err, x_update_err])
print(x.eval(), y.eval())
In [4]:
class OptimizadorN():#tf.Operation):
def set_objetive(self,obj):
self.obj= obj
def __init__(self, T = 100):
self.variables = tf.global_variables()
self.T = T
#self._graph = tf.get_default_graph()
#self._c_op = None
def method(self):
f_old = self.obj.eval()
var = []
dependencies = []
for variable in self.variables:
var.append (variable.assign(variable.eval()))
dependencies.append (variable.assign(variable.eval() - np.random.uniform()))
with tf.control_dependencies(dependencies):
#print(sess.run([ y_update, x_update]))
nodes = dependencies
delta = f.eval() - f_old
if delta > 0 :
next
elif pow(np.e, -delta/ self.T) >= np.random.uniform():
#print("probablidad" + str(pow(np.e, -delta/ self.T)))
next
else:
nodes = var
self.operaciones = nodes
def eval(self):
self.method()
return(self.operaciones)
# bloque principal
opt =OptimizadorN()
opt.set_objetive(f)
with tf.Session() as sess:
init_op = tf.global_variables_initializer()
sess.run(init_op);
#la funcion solo returna los nodos del grafo que deben ser ejecutados, a la final yo los encadeno en evaluar el assign del
#valor en el que deben terminar las variables
print(sess.run(opt.eval()))
#Vea que si se necesita que sea un tensor para mandarlo directo
print(sess.run(opt))
In [5]:
[1,2,3,4,5,10][0]
Out[5]:
In [6]:
np.random.uniform()
Out[6]:
In [ ]: