In [1]:
import matplotlib.pyplot as plt
try:
    xrange = xrange
except:
    xrange = range
from PIL import ImageGrab
import cv2
import time
import math
import datetime
import random
import io
from lib.getkeys import key_check
from lib.reinforcement import Qnetwork,updateTarget,updateTargetGraph
from lib.SQL import SQLCalls
from sys import stdout
import sqlite3
import tensorflow as tf
import tensorflow.contrib.slim as slim
import numpy as np
import os
from PIL import Image
from keras import backend as K
from keras.models import load_model
from keras.utils import plot_model
from keras.models import Model,Sequential
from keras.layers import Input, LSTM, Dense, Dropout, Conv2D, MaxPooling2D,concatenate, Flatten, GlobalAveragePooling2D
from keras.utils import to_categorical
SQL=SQLCalls()


Using TensorFlow backend.

In [6]:
def make_model():
    image_model_inputs = Input(shape=(580,580,3),dtype='float32',name='main_image')
    image_model=Conv2D(16, (2, 2), padding='valid', activation='relu')(image_model_inputs)
    image_model=Conv2D(16, (2, 2), padding='valid', activation='relu')(image_model)

    image_model=Conv2D(32, (1,1),strides=2, padding='valid', activation='relu')(image_model)
    image_model=Conv2D(32, (1,1),strides=2, padding='valid', activation='relu')(image_model)

    image_model=Conv2D(8, (1,1),strides=3, padding='same', activation='relu')(image_model)
    image_model=Conv2D(8, (1,1),strides=2, padding='same', activation='relu')(image_model)
    
    image_model=Flatten()(image_model)
    
    gene_model_inputs = Input(shape=(13,13,12),dtype='float32',name='gene_image')
    gene_model=Conv2D(16, (2, 2), padding='valid', activation='relu')(gene_model_inputs)
    gene_model=Conv2D(16, (2, 2), padding='valid', activation='relu')(gene_model)
    
   # gene_model=Conv2D(8, (1,1),strides=3, padding='same', activation='relu')(gene_model)
    #gene_model=Conv2D(8, (1,1),strides=2, padding='same', activation='relu')(gene_model)
    
    gene_model=Flatten()(gene_model)
    
    combined_model=concatenate([image_model,gene_model])
    
    combined_model=Dense(32, activation='relu')(combined_model)
    combined_model=Dense(8, activation='relu')(combined_model)
    combined_model_preditions=Dense(2, activation='softmax')(combined_model)
    model=Model(inputs=[image_model_inputs,gene_model_inputs],outputs=combined_model_preditions)
    model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
    model.summary()
    return model
model=make_model()


____________________________________________________________________________________________________
Layer (type)                     Output Shape          Param #     Connected to                     
====================================================================================================
main_image (InputLayer)          (None, 580, 580, 3)   0                                            
____________________________________________________________________________________________________
conv2d_9 (Conv2D)                (None, 579, 579, 16)  208         main_image[0][0]                 
____________________________________________________________________________________________________
conv2d_10 (Conv2D)               (None, 578, 578, 16)  1040        conv2d_9[0][0]                   
____________________________________________________________________________________________________
conv2d_11 (Conv2D)               (None, 289, 289, 32)  544         conv2d_10[0][0]                  
____________________________________________________________________________________________________
conv2d_12 (Conv2D)               (None, 145, 145, 32)  1056        conv2d_11[0][0]                  
____________________________________________________________________________________________________
gene_image (InputLayer)          (None, 13, 13, 12)    0                                            
____________________________________________________________________________________________________
conv2d_13 (Conv2D)               (None, 49, 49, 8)     264         conv2d_12[0][0]                  
____________________________________________________________________________________________________
conv2d_15 (Conv2D)               (None, 12, 12, 16)    784         gene_image[0][0]                 
____________________________________________________________________________________________________
conv2d_14 (Conv2D)               (None, 25, 25, 8)     72          conv2d_13[0][0]                  
____________________________________________________________________________________________________
conv2d_16 (Conv2D)               (None, 11, 11, 16)    1040        conv2d_15[0][0]                  
____________________________________________________________________________________________________
flatten_3 (Flatten)              (None, 5000)          0           conv2d_14[0][0]                  
____________________________________________________________________________________________________
flatten_4 (Flatten)              (None, 1936)          0           conv2d_16[0][0]                  
____________________________________________________________________________________________________
concatenate_2 (Concatenate)      (None, 6936)          0           flatten_3[0][0]                  
                                                                   flatten_4[0][0]                  
____________________________________________________________________________________________________
dense_4 (Dense)                  (None, 32)            221984      concatenate_2[0][0]              
____________________________________________________________________________________________________
dense_5 (Dense)                  (None, 8)             264         dense_4[0][0]                    
____________________________________________________________________________________________________
dense_6 (Dense)                  (None, 2)             18          dense_5[0][0]                    
====================================================================================================
Total params: 227,274
Trainable params: 227,274
Non-trainable params: 0
____________________________________________________________________________________________________

In [25]:
model = load_model('dqn_frozen_model.h5')

In [3]:
def setup_genomes():
    BoxRadius=6
    BoxLength=BoxRadius*2+1
    BoxArea=(BoxLength)*(BoxLength)
    gene_image=np.empty([len(Genomes),BoxLength,BoxLength,12])
    gene_image.fill(0)
    BUTTON_AMOUNT=6
    for Genome_Num,Genome in enumerate(Genomes):
        for gene in Genome:
            genome_type=0
            #print(gene[0],Genome_Num,BoxArea*2)
            if gene[0]<BoxArea:
                pass
                #print("Normal Input")
            elif gene[0]>BoxArea*2:    
                #print("bias")
                continue
            else:
                pass
               # print("Inverse Input")
                genome_type+=BUTTON_AMOUNT
            genome_type+=int(gene[1]-1000001) 
            if genome_type>=0:
                # print ,Y,Type(Type of Input,Button Pressed)
                gene_image[Genome_Num][int(gene[0]%(BoxArea)//BoxLength)][int(gene[0]%(BoxArea)%13)][genome_type]=gene[2] 
    return gene_image
Genomes=SQL.GatherGenomes()
gene_images=setup_genomes()

In [9]:
def adapt_array(arr):
    """
    http://stackoverflow.com/a/31312102/190597 (SoulNibbler)
    """
    out = io.BytesIO()
    np.save(out, arr)
    out.seek(0)
    return sqlite3.Binary(out.read())
def convert_array(text):
    out = io.BytesIO(text)
    out.seek(0)
    return np.load(out)
sqlite3.register_adapter(np.ndarray, adapt_array)

# Converts TEXT to np.array when selecting
sqlite3.register_converter("array", convert_array)

con=sqlite3.connect('DQN.db', detect_types=sqlite3.PARSE_DECLTYPES,isolation_level=None)
cur = con.cursor()
cur.execute("PRAGMA synchronous = OFF;")
cur.execute("PRAGMA journal_mode=WAL;")
cur.execute("PRAGMA read_uncommitted = true;")
cur.execute("SELECT GenomeNum,Gene,GeneContent FROM Genes ORDER BY Genome,Gene")
sql = '''Select image
        from example_images
        WHERE score=0 and GenomeKey like '%90%' LIMIT 1'''
cur.execute(sql)
results=np.array(cur.fetchone())[0]
cur.execute("PRAGMA synchronous = OFF;")
cur.execute("PRAGMA journal_mode=WAL;")
cur.execute("PRAGMA read_uncommitted = true;")
cur.execute("SELECT GenomeNum,Gene,GeneContent FROM Genes ORDER BY Genome,Gene")
sql = '''Select geneimage
        from example_genes where GenomeKey like '%05:43:07.450%' LIMIT 32'''
cur.execute(sql)
results2=np.array(cur.fetchall()).reshape([32,13,13,12])
Image.fromarray(results,'RGB')
Image.fromarray(Images[0],'RGB')


Out[9]:

In [8]:
image_duplicated=np.tile(Images[0], (32,1,1,1))
print(image_duplicated.shape)
print(results2.shape)
history=model.predict([image_duplicated,results2],batch_size=16)
print(np.sum(history[:,0]))
print(np.argmax((history[:,0])))
print(np.sum(history[:,1]))
print(max(history[:,0]))
print(history)


(32, 580, 580, 3)
(32, 13, 13, 12)
4.4033
26
27.5967
0.508948
[[ 0.19027609  0.80972391]
 [ 0.26692012  0.73307985]
 [ 0.13676591  0.8632341 ]
 [ 0.05729575  0.94270432]
 [ 0.05082692  0.94917315]
 [ 0.18086362  0.81913638]
 [ 0.1114553   0.88854468]
 [ 0.11648451  0.88351542]
 [ 0.16224658  0.83775336]
 [ 0.09685729  0.90314269]
 [ 0.09572402  0.90427595]
 [ 0.17685317  0.82314682]
 [ 0.08388741  0.91611266]
 [ 0.11657899  0.883421  ]
 [ 0.12548617  0.87451386]
 [ 0.08272504  0.91727495]
 [ 0.08477109  0.91522884]
 [ 0.0553071   0.94469285]
 [ 0.11489817  0.88510191]
 [ 0.16331336  0.83668667]
 [ 0.44810668  0.55189335]
 [ 0.07944944  0.92055053]
 [ 0.15852693  0.84147304]
 [ 0.09698548  0.90301454]
 [ 0.02033444  0.97966558]
 [ 0.11660488  0.88339514]
 [ 0.50894761  0.49105233]
 [ 0.05148372  0.94851631]
 [ 0.08922724  0.9107728 ]
 [ 0.24015993  0.75984007]
 [ 0.09552396  0.90447599]
 [ 0.02841147  0.97158855]]

In [24]:
results=(history[:,0])
UsedGenomes=np.ones(Genomes.shape[0])
results2=results*UsedGenomes
for i in range(10):
    results2=results*UsedGenomes
    results3=np.argmax(results2)
    print(results3)
    UsedGenomes[results3]=0


26
20
1
29
0
5
11
19
8
22

In [5]:
Images=[]
for i in range(150):
    sql = '''Select image
            from example_images
            WHERE id=? LIMIT 1'''
    cur.execute(sql,(i+1,))
    results=np.array(cur.fetchone())[0]
    image_duplicated=np.tile(results, (32,1,1,1))
    history=model.predict([image_duplicated,results2],batch_size=16)
    if np.sum(history[:,0])>2 and np.sum(history[:,0])<30:
        print(len(Images),np.sum(history[:,0]))
        Images.append(results)


0 4.4033
---------------------------------------------------------------------------
KeyboardInterrupt                         Traceback (most recent call last)
<ipython-input-5-36a70a04075b> in <module>()
      7     results=np.array(cur.fetchone())[0]
      8     image_duplicated=np.tile(results, (32,1,1,1))
----> 9     history=model.predict([image_duplicated,results2],batch_size=16)
     10     if np.sum(history[:,0])>2 and np.sum(history[:,0])<30:
     11         print(len(Images),np.sum(history[:,0]))

C:\ProgramData\Miniconda3\lib\site-packages\keras\engine\training.py in predict(self, x, batch_size, verbose, steps)
   1711         f = self.predict_function
   1712         return self._predict_loop(f, ins, batch_size=batch_size,
-> 1713                                   verbose=verbose, steps=steps)
   1714 
   1715     def train_on_batch(self, x, y,

C:\ProgramData\Miniconda3\lib\site-packages\keras\engine\training.py in _predict_loop(self, f, ins, batch_size, verbose, steps)
   1267                 else:
   1268                     ins_batch = _slice_arrays(ins, batch_ids)
-> 1269                 batch_outs = f(ins_batch)
   1270                 if not isinstance(batch_outs, list):
   1271                     batch_outs = [batch_outs]

C:\ProgramData\Miniconda3\lib\site-packages\keras\backend\tensorflow_backend.py in __call__(self, inputs)
   2271         updated = session.run(self.outputs + [self.updates_op],
   2272                               feed_dict=feed_dict,
-> 2273                               **self.session_kwargs)
   2274         return updated[:len(self.outputs)]
   2275 

C:\ProgramData\Miniconda3\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    776     try:
    777       result = self._run(None, fetches, feed_dict, options_ptr,
--> 778                          run_metadata_ptr)
    779       if run_metadata:
    780         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

C:\ProgramData\Miniconda3\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    980     if final_fetches or final_targets:
    981       results = self._do_run(handle, final_targets, final_fetches,
--> 982                              feed_dict_string, options, run_metadata)
    983     else:
    984       results = []

C:\ProgramData\Miniconda3\lib\site-packages\tensorflow\python\client\session.py in _do_run(self, handle, target_list, fetch_list, feed_dict, options, run_metadata)
   1030     if handle is None:
   1031       return self._do_call(_run_fn, self._session, feed_dict, fetch_list,
-> 1032                            target_list, options, run_metadata)
   1033     else:
   1034       return self._do_call(_prun_fn, self._session, handle, feed_dict,

C:\ProgramData\Miniconda3\lib\site-packages\tensorflow\python\client\session.py in _do_call(self, fn, *args)
   1037   def _do_call(self, fn, *args):
   1038     try:
-> 1039       return fn(*args)
   1040     except errors.OpError as e:
   1041       message = compat.as_text(e.message)

C:\ProgramData\Miniconda3\lib\site-packages\tensorflow\python\client\session.py in _run_fn(session, feed_dict, fetch_list, target_list, options, run_metadata)
   1019         return tf_session.TF_Run(session, options,
   1020                                  feed_dict, fetch_list, target_list,
-> 1021                                  status, run_metadata)
   1022 
   1023     def _prun_fn(session, handle, feed_dict, fetch_list):

KeyboardInterrupt: 

In [111]:
Image.fromarray(Images[2],'RGB')


Out[111]:

In [ ]: