In [24]:
"""
Minimal character-level Vanilla RNN model. Written by Andrej Karpathy (@karpathy)
BSD License
"""
import numpy as np
import codecs
# data I/O
data = open('shige1.txt', 'r',encoding= 'UTF-8').read() # should be simple plain text file
#print(data)
chars = list(set(data))
#print(chars)
data_size, vocab_size = len(data), len(chars)
print('data has %d characters, %d unique.' % (data_size, vocab_size))


data has 4422 characters, 1059 unique.

In [31]:
char_to_ix = { ch:i for i,ch in enumerate(chars) }
ix_to_char = { i:ch for i,ch in enumerate(chars) }

# hyperparameters
hidden_size = 100 # size of hidden layer of neurons
seq_length = 25 # number of steps to unroll the RNN for
learning_rate = 1e-1

# model parameters
Wxh = np.random.randn(hidden_size, vocab_size)*0.01 # input to hidden
Whh = np.random.randn(hidden_size, hidden_size)*0.01 # hidden to hidden
Why = np.random.randn(vocab_size, hidden_size)*0.01 # hidden to output
bh = np.zeros((hidden_size, 1)) # hidden bias
by = np.zeros((vocab_size, 1)) # output bias

def lossFun(inputs, targets, hprev):
  """
  inputs,targets are both list of integers.
  hprev is Hx1 array of initial hidden state
  returns the loss, gradients on model parameters, and last hidden state
  """
  xs, hs, ys, ps = {}, {}, {}, {}
  hs[-1] = np.copy(hprev)
  loss = 0
  # forward pass
  for t in range(len(inputs)):
    xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation
    xs[t][inputs[t]] = 1
    hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state
    ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars
    ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars
    loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)
  # backward pass: compute gradients going backwards
  dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
  dbh, dby = np.zeros_like(bh), np.zeros_like(by)
  dhnext = np.zeros_like(hs[0])
  for t in reversed(range(len(inputs))):
    dy = np.copy(ps[t])
    dy[targets[t]] -= 1 # backprop into y. see http://cs231n.github.io/neural-networks-case-study/#grad if confused here
    dWhy += np.dot(dy, hs[t].T)
    dby += dy
    dh = np.dot(Why.T, dy) + dhnext # backprop into h
    dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
    dbh += dhraw
    dWxh += np.dot(dhraw, xs[t].T)
    dWhh += np.dot(dhraw, hs[t-1].T)
    dhnext = np.dot(Whh.T, dhraw)
  for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
    np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
  return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]

def sample(h, seed_ix, n):
  """ 
  sample a sequence of integers from the model 
  h is memory state, seed_ix is seed letter for first time step
  """
  x = np.zeros((vocab_size, 1))
  x[seed_ix] = 1
  ixes = []
  for t in range(n):
    h = np.tanh(np.dot(Wxh, x) + np.dot(Whh, h) + bh)
    y = np.dot(Why, h) + by
    p = np.exp(y) / np.sum(np.exp(y))
    ix = np.random.choice(range(vocab_size), p=p.ravel())
    x = np.zeros((vocab_size, 1))
    x[ix] = 1
    ixes.append(ix)
  return ixes

n, p = 0, 0
mWxh, mWhh, mWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
mbh, mby = np.zeros_like(bh), np.zeros_like(by) # memory variables for Adagrad
smooth_loss = -np.log(1.0/vocab_size)*seq_length # loss at iteration 0
for k in range(0,2001):
  # prepare inputs (we're sweeping from left to right in steps seq_length long)
  if p+seq_length+1 >= len(data) or n == 0: 
    hprev = np.zeros((hidden_size,1)) # reset RNN memory
    p = 0 # go from start of data
  inputs = [char_to_ix[ch] for ch in data[p:p+seq_length]]
  targets = [char_to_ix[ch] for ch in data[p+1:p+seq_length+1]]

  # sample from the model now and then
  if n % 500 == 0:
    sample_ix = sample(hprev, inputs[0], 200)
    txt = ''.join(ix_to_char[ix] for ix in sample_ix)
    print('----\n %s \n----' % (txt, ))

  # forward seq_length characters through the net and fetch gradient
  loss, dWxh, dWhh, dWhy, dbh, dby, hprev = lossFun(inputs, targets, hprev)
  smooth_loss = smooth_loss * 0.999 + loss * 0.001
  if n % 500 == 0: print('iter %d, loss: %f' % (n, smooth_loss)) # print progress
  
  # perform parameter update with Adagrad
  for param, dparam, mem in zip([Wxh, Whh, Why, bh, by], 
                                [dWxh, dWhh, dWhy, dbh, dby], 
                                [mWxh, mWhh, mWhy, mbh, mby]):
    mem += dparam * dparam
    param += -learning_rate * dparam / np.sqrt(mem + 1e-8) # adagrad update

  p += seq_length # move data pointer
  n += 1 # iteration counter


----
 端汉洲良晖偶冬佳官田销尝户二文数以弓鬓牛停波戍却尽停寒徒顾搔岭琴满凄谢色斗流松浦陪妇帷投谙语扫岐属自频遍说土心沉烦沾京徐傍开出灶侵圣细鸳苹陛留兵平物层姓达永碎散旧没那念知额只迥者芬潮疑隔汉东线傅陛食鸿宗度姓休密惹潺弹度问厨正寐昔漠曙扫哭苹碑桂各出涯瘴红无天期篇宦竹周费林鸿桑同儿枫霞漠九棋坤盈怅折承火松角仍迷曲雁楫邑绮亦钓念公投望珑益火坼闲胡借觅妓鱼迹屋遁撷如恋共法远柳谢把姑酒苍涕复风况床授悦秦羡 
----
iter 0, loss: 174.127009
----
 来人孙坟邀满
久冬际微秦翁

无短三人城春凛昨啸宝干内片谈近身物头草分来层
筝无杖火情亲住
淮不树洲六啼前沧山鹃比伴魂各
总光城鼓灯
得里古愁
寞泊见帆钟迹诗流息弄旷无王达石制迷舒师风轩宿姑妇搴孙更
明
骑节当蜀接无声盈夕花黍
悲杯青戍混下叶已壁帐涕旦荣若想讴舟红语堆撷
生魏馆戌生有断各灯钓识鱼音
集说别命下细愧愁
情铢虚迷短雄桂白与又乡客禽自妓几酒旧故乱今新洞君晓寝鸣孤晓自魑剑邀两支残丘数径怅内 
----
iter 500, loss: 180.810555
----
 峰蝉照空寺时入
袜陇江塘多堪雪
汉来取
偶百长暮驻留
代还独湖家万迟荒夜
忘静邻古草扉寺浣策风蜀青林长无朝旧风高霞魑花浮襄流红戌童故落中
女月汀立公扉夜儿乱长郎相遣春响云竟长皆落上曲
谢新过夜斑莓徒连心径长晨天虚声
鼓何辅然撷桑河浮正堪恋
毒三竹
鸟永何东
桂树汤汉听野无中世独古预残静北但双春行阻迢上亲日寻叶十亦朝早捣暝帐空万
情有里夜前
晨怯本长夜照郢采
中暗绕
日济空叶襄移征郊
甲外住青长兴居 
----
iter 1000, loss: 173.759436
----
 心山江不竟里苍尽复前
白心扉
过醉忘里
东无
清钓篁日上秋风
空舟远野小花敢人恋
城采国客原水逼过来外树纷惜
魂清迟
初在树更天首山
自家物归慵灯徐声晚故黯随不共园白颜寞八与杯长歇覆深先姓寄日
樯日额杳敝去买圣来
日解年
洗峰为入秋吾惟雨
橦卧须
残亲行销关
水颜峰安
泠阻吞外寥冷散故樵
事此见生事处飘坤月陲传鼓驻惜亦风有此陌秋惆相
数石孙江空黄看空旧听楼
幽滋策
坟柱与
如投弄细秋
歌舞月秋惜妾 
----
iter 1500, loss: 165.554750
----
 惊住
骨欲安高
种西
孤兰星迟径大子人情在庐楚一声独浑园蝶不所逢
乡郎浑山长
阔白病乱雁得如
折全适江鼓惹茨此月面涵肉取禅玉嫌遣来惊峰
可园下岂临残
帆过返
沧心应覆阙惊令悬烽幽调
仗轩鸿阳晚梦五故陛春游去清鹤梧乡复天壑投故乡惊客可爱烦不游处钟
谁塞离里买归已一潮
切岂何白坐应眼蝴深授

歇穷派水孤所日
远闻砧儿
海夜故潭
觉罢岭春途此平
飞分鸣迟尽林土来芜入青粟谏满迎对心北樵林水塞怜年
至离月夜 
----
iter 2000, loss: 161.083531

In [ ]: