@shamdasani/

DigitRecognition

Python 2.7

No description

fork
loading
Files
  • main.py

This Plugin Crashed!

Error: Error: must not create an existing file {"type":"CREATE_FILE","wid":"0.48867200457005855","path":"main.py","file":{"path":"main.py","content":{"asEncoding":{"base64":"aW1wb3J0IG51bXB5IGFzIG5wCgp6ZXJvID0gWwogIDAsIDEsIDEsIDAsCiAgMSwgMCwgMCwgMSwKICAxLCAwLCAwLCAxLAogIDEsIDAsIDAsIDEsCiAgMCwgMSwgMSwgMApdCgpvbmUgPSBbCiAgMCwgMCwgMSwgMCwKICAwLCAwLCAxLCAwLAogIDAsIDAsIDEsIDAsCiAgMCwgMCwgMSwgMCwKICAwLCAwLCAxLCAwCl0KCnR3byA9IFsKICAwLCAxLCAxLCAwLAogIDEsIDAsIDAsIDEsCiAgMCwgMCwgMSwgMCwKICAwLCAxLCAwLCAwLAogIDEsIDEsIDEsIDEKXQoKdGhyZWUgPSBbCiAgMSwgMSwgMSwgMSwKICAwLCAwLCAwLCAxLAogIDAsIDEsIDEsIDEsCiAgMCwgMCwgMCwgMSwKICAxLCAxLCAxLCAxCl0KCgpwcmVkaWN0ID0gWwogMCwgMSwgMSwgMCwKIDEsIDAsIDAsIDEsCiAwLCAwLCAxLCAwLAogMCwgMSwgMCwgMCwKIDEsIDEsIDEsIDEKXQoKWCA9IG5wLmFycmF5KCh6ZXJvLCBvbmUsIHR3bywgdGhyZWUpLCBkdHlwZT1mbG9hdCkKeSA9IG5wLmFycmF5KChbMF0sIFsxXSwgWzJdLCBbM10pLCBkdHlwZT1mbG9hdCkKeFByZWRpY3RlZCA9IG5wLmFycmF5KChwcmVkaWN0KSwgZHR5cGU9ZmxvYXQpCgojIHNjYWxlIHVuaXRzCnkgPSB5LzMgIyBtYXggdGVzdCBzY29yZSBpcyAxMDAKCmNsYXNzIE5ldXJhbF9OZXR3b3JrKG9iamVjdCk6CiAgZGVmIF9faW5pdF9fKHNlbGYpOgogICAgI3BhcmFtZXRlcnMKICAgIHNlbGYuaW5wdXRTaXplID0gMjAKICAgIHNlbGYub3V0cHV0U2l6ZSA9IDEKICAgIHNlbGYuaGlkZGVuU2l6ZSA9IDIwCgogICAgI3dlaWdodHMKICAgIHNlbGYuVzEgPSBucC5yYW5kb20ucmFuZG4oc2VsZi5pbnB1dFNpemUsIHNlbGYuaGlkZGVuU2l6ZSkgIyAoM3gyKSB3ZWlnaHQgbWF0cml4IGZyb20gaW5wdXQgdG8gaGlkZGVuIGxheWVyCiAgICBzZWxmLlcyID0gbnAucmFuZG9tLnJhbmRuKHNlbGYuaGlkZGVuU2l6ZSwgc2VsZi5vdXRwdXRTaXplKSAjICgzeDEpIHdlaWdodCBtYXRyaXggZnJvbSBoaWRkZW4gdG8gb3V0cHV0IGxheWVyCgogIGRlZiBmb3J3YXJkKHNlbGYsIFgpOgogICAgI2ZvcndhcmQgcHJvcGFnYXRpb24gdGhyb3VnaCBvdXIgbmV0d29yawogICAgc2VsZi56ID0gbnAuZG90KFgsIHNlbGYuVzEpICMgZG90IHByb2R1Y3Qgb2YgWCAoaW5wdXQpIGFuZCBmaXJzdCBzZXQgb2YgM3gyIHdlaWdodHMKICAgIHNlbGYuejIgPSBzZWxmLnNpZ21vaWQoc2VsZi56KSAjIGFjdGl2YXRpb24gZnVuY3Rpb24KICAgIHNlbGYuejMgPSBucC5kb3Qoc2VsZi56Miwgc2VsZi5XMikgIyBkb3QgcHJvZHVjdCBvZiBoaWRkZW4gbGF5ZXIgKHoyKSBhbmQgc2Vjb25kIHNldCBvZiAzeDEgd2VpZ2h0cwogICAgbyA9IHNlbGYuc2lnbW9pZChzZWxmLnozKSAjIGZpbmFsIGFjdGl2YXRpb24gZnVuY3Rpb24KICAgIHJldHVybiBvCgogIGRlZiBzaWdtb2lkKHNlbGYsIHMpOgogICAgIyBhY3RpdmF0aW9uIGZ1bmN0aW9uCiAgICByZXR1cm4gMS8oMStucC5leHAoLXMpKQoKICBkZWYgc2lnbW9pZFByaW1lKHNlbGYsIHMpOgogICAgI2Rlcml2YXRpdmUgb2Ygc2lnbW9pZAogICAgcmV0dXJuIHMgKiAoMSAtIHMpCgogIGRlZiBiYWNrd2FyZChzZWxmLCBYLCB5LCBvKToKICAgICMgYmFja3dhcmQgcHJvcGdhdGUgdGhyb3VnaCB0aGUgbmV0d29yawogICAgc2VsZi5vX2Vycm9yID0geSAtIG8gIyBlcnJvciBpbiBvdXRwdXQKICAgIHNlbGYub19kZWx0YSA9IHNlbGYub19lcnJvcipzZWxmLnNpZ21vaWRQcmltZShvKSAjIGFwcGx5aW5nIGRlcml2YXRpdmUgb2Ygc2lnbW9pZCB0byBlcnJvcgoKICAgIHNlbGYuejJfZXJyb3IgPSBzZWxmLm9fZGVsdGEuZG90KHNlbGYuVzIuVCkgIyB6MiBlcnJvcjogaG93IG11Y2ggb3VyIGhpZGRlbiBsYXllciB3ZWlnaHRzIGNvbnRyaWJ1dGVkIHRvIG91dHB1dCBlcnJvcgogICAgc2VsZi56Ml9kZWx0YSA9IHNlbGYuejJfZXJyb3Iqc2VsZi5zaWdtb2lkUHJpbWUoc2VsZi56MikgIyBhcHBseWluZyBkZXJpdmF0aXZlIG9mIHNpZ21vaWQgdG8gejIgZXJyb3IKCiAgICBzZWxmLlcxICs9IFguVC5kb3Qoc2VsZi56Ml9kZWx0YSkgIyBhZGp1c3RpbmcgZmlyc3Qgc2V0IChpbnB1dCAtLT4gaGlkZGVuKSB3ZWlnaHRzCiAgICBzZWxmLlcyICs9IHNlbGYuejIuVC5kb3Qoc2VsZi5vX2RlbHRhKSAjIGFkanVzdGluZyBzZWNvbmQgc2V0IChoaWRkZW4gLS0+IG91dHB1dCkgd2VpZ2h0cwoKICBkZWYgdHJhaW4oc2VsZiwgWCwgeSk6CiAgICBvID0gc2VsZi5mb3J3YXJkKFgpCiAgICBzZWxmLmJhY2t3YXJkKFgsIHksIG8pCgogIGRlZiBwcmVkaWN0KHNlbGYpOgogICAgcHJpbnQgIlByZWRpY3RlZCBkYXRhIGJhc2VkIG9uIHRyYWluZWQgd2VpZ2h0czogIjsKICAgIHByaW50ICJJbnB1dCAoc2NhbGVkKTogXG4iICsgc3RyKHhQcmVkaWN0ZWQpOwogICAgcHJpbnQgIkFjdHVhbCBPdXRwdXQ6IFxuIiArIHN0cigoc2VsZi5mb3J3YXJkKHhQcmVkaWN0ZWQpKSozKTsKICAgIHByaW50ICJSb3VuZGVkIE91dHB1dDogXG4iICsgc3RyKHJvdW5kKChzZWxmLmZvcndhcmQoeFByZWRpY3RlZCkpKjMpKTsKCk5OID0gTmV1cmFsX05ldHdvcmsoKQpmb3IgaSBpbiB4cmFuZ2UoMTAwMDApOiAjIHRyYWlucyB0aGUgTk4gMTAwLDAwMCB0aW1lcwogIHByaW50ICIjIiArIHN0cihpKSArICJcbiIKICBwcmludCAiSW5wdXQ6IFxuIiArIHN0cihYKQogIHByaW50ICJBY3R1YWwgT3V0cHV0OiBcbiIgKyBzdHIoeSozKQogIHByaW50ICJQcmVkaWN0ZWQgT3V0cHV0OiBcbiIgKyBzdHIoTk4uZm9yd2FyZChYKSozKQogIHByaW50ICJMb3NzOiBcbiIgKyBzdHIobnAubWVhbihucC5zcXVhcmUoeSAtIE5OLmZvcndhcmQoWCkpKSkgIyBtZWFuIHN1bSBzcXVhcmVkIGxvc3MKICBwcmludCAiXG4iCiAgTk4udHJhaW4oWCwgeSkKCk5OLnByZWRpY3QoKQo="},"asBuffer":null},"loaded":true}}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
import numpy as np

zero = [
  0, 1, 1, 0,
  1, 0, 0, 1,
  1, 0, 0, 1,
  1, 0, 0, 1,
  0, 1, 1, 0
]

one = [
  0, 0, 1, 0,
  0, 0, 1, 0,
  0, 0, 1, 0,
  0, 0, 1, 0,
  0, 0, 1, 0
]

two = [
  0, 1, 1, 0,
  1, 0, 0, 1,
  0, 0, 1, 0,
  0, 1, 0, 0,
  1, 1, 1, 1
]

three = [
  1, 1, 1, 1,
  0, 0, 0, 1,
  0, 1, 1, 1,
  0, 0, 0, 1,
  1, 1, 1, 1
]


predict = [
 0, 1, 1, 0,
 1, 0, 0, 1,
 0, 0, 1, 0,
 0, 1, 0, 0,
 1, 1, 1, 1
]

X = np.array((zero, one, two, three), dtype=float)
y = np.array(([0], [1], [2], [3]), dtype=float)
xPredicted = np.array((predict), dtype=float)

# scale units
y = y/3 # max test score is 100

class Neural_Network(object):
  def __init__(self):
    #parameters
    self.inputSize = 20
    self.outputSize = 1
    self.hiddenSize = 20

    #weights
    self.W1 = np.random.randn(self.inputSize, self.hiddenSize) # (3x2) weight matrix from input to hidden layer
    self.W2 = np.random.randn(self.hiddenSize, self.outputSize) # (3x1) weight matrix from hidden to output layer

  def forward(self, X):
    #forward propagation through our network
    self.z = np.dot(X, self.W1) # dot product of X (input) and first set of 3x2 weights
    self.z2 = self.sigmoid(self.z) # activation function
    self.z3 = np.dot(self.z2, self.W2) # dot product of hidden layer (z2) and second set of 3x1 weights
    o = self.sigmoid(self.z3) # final activation function
    return o

  def sigmoid(self, s):
    # activation function
    return 1/(1+np.exp(-s))

  def sigmoidPrime(self, s):
    #derivative of sigmoid
    return s * (1 - s)

  def backward(self, X, y, o):
    # backward propgate through the network
    self.o_error = y - o # error in output
    self.o_delta = self.o_error*self.sigmoidPrime(o) # applying derivative of sigmoid to error

    self.z2_error = self.o_delta.dot(self.W2.T) # z2 error: how much our hidden layer weights contributed to output error
    self.z2_delta = self.z2_error*self.sigmoidPrime(self.z2) # applying derivative of sigmoid to z2 error

    self.W1 += X.T.dot(self.z2_delta) # adjusting first set (input --> hidden) weights
    self.W2 += self.z2.T.dot(self.o_delta) # adjusting second set (hidden --> output) weights

  def train(self, X, y):
    o = self.forward(X)
    self.backward(X, y, o)

  def predict(self):
    print "Predicted data based on trained weights: ";
    print "Input (scaled): \n" + str(xPredicted);
    print "Actual Output: \n" + str((self.forward(xPredicted))*3);
    print "Rounded Output: \n" + str(round((self.forward(xPredicted))*3));

NN = Neural_Network()
for i in xrange(10000): # trains the NN 100,000 times
  print "#" + str(i) + "\n"
  print "Input: \n" + str(X)
  print "Actual Output: \n" + str(y*3)
  print "Predicted Output: \n" + str(NN.forward(X)*3)
  print "Loss: \n" + str(np.mean(np.square(y - NN.forward(X)))) # mean sum squared loss
  print "\n"
  NN.train(X, y)

NN.predict()