In [2]:
import pandas as pd
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
In [11]:
historic = pd.read_csv("2016_2.csv", header=0) #2016_2.csv is the heavily cut version of 2016.csv
rain = pd.read_csv("2016_labels.csv", header=0)
rain2 = pd.get_dummies(rain)
In [12]:
rain
Out[12]:
In [5]:
#as before, x contains the data of the training set. (need to decide which columns should be kept)
x = tf.placeholder(tf.float32, [None, 6])
n = 2 #numbers of days back the algo can see (not currently implemented)
#weights for the two states, percipitation vs dry (could easily add an additional state for rain vs snow)
W = tf.Variable(tf.zeros([6, 2]))
b = tf.Variable(tf.zeros([2]))
#predicted percipitation result
y = tf.nn.softmax(tf.matmul(x, W) + b)
#actual result
y_ = tf.placeholder(tf.float32, [None, 2])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
In [ ]:
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
#The actual training is defined here. The algorithm being used is gradient descent, although this may be changed
#depending on user preference/the task at hand.
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
In [6]:
historic
Out[6]:
Code from past tutorial, take from it as needed
import tensorflow as tf
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reducesum(y * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.global_variables_initializer() sess = tf.Session() sess.run(init)
for i in range(100000): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batchxs, y: batch_ys})
correctprediction = tf.equal(tf.argmax(y,1), tf.argmax(y,1)) accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) print(sess.run(accuracy, feeddict={x: mnist.test.images, y: mnist.test.labels}))
In [15]:
tf.reduce_sum(x)
Out[15]:
aslkjdfhapsdjhflaskdjfghlsakdjf
In [ ]: