loading

This Plugin Crashed!

Error: Error: must not create an existing file {"type":"CREATE_FILE","wid":"0.22441747331493445","path":"main.py","file":{"path":"main.py","content":{"asEncoding":{"base64":"aW1wb3J0IG51bXB5IGFzIG5wCgojIFggPSAoaG91cnMgc3R1ZHlpbmcsIGhvdXJzIHNsZWVwaW5nKSwgeSA9IHNjb3JlIG9uIHRlc3QKeEFsbCA9IG5wLmFycmF5KChbMiwgOV0sIFsxLCA1XSwgWzMsIDZdLCBbNSwgMTBdKSwgZHR5cGU9ZmxvYXQpICMgaW5wdXQgZGF0YQp5ID0gbnAuYXJyYXkoKFs5Ml0sIFs4Nl0sIFs4OV0pLCBkdHlwZT1mbG9hdCkgIyBvdXRwdXQKCiMgc2NhbGUgdW5pdHMKeEFsbCA9IHhBbGwvbnAuYW1heCh4QWxsLCBheGlzPTApICMgc2NhbGluZyBpbnB1dCBkYXRhCnkgPSB5LzEwMCAjIHNjYWxpbmcgb3V0cHV0IGRhdGEgKG1heCB0ZXN0IHNjb3JlIGlzIDEwMCkKCiMgc3BsaXQgZGF0YQpYID0gbnAuc3BsaXQoeEFsbCwgWzNdKVswXSAjIHRyYWluaW5nIGRhdGEKeFByZWRpY3RlZCA9IG5wLnNwbGl0KHhBbGwsIFszXSlbMV0gIyB0ZXN0aW5nIGRhdGEKCnkgPSBucC5hcnJheSgoWzkyXSwgWzg2XSwgWzg5XSksIGR0eXBlPWZsb2F0KQp5ID0geS8xMDAgIyBtYXggdGVzdCBzY29yZSBpcyAxMDAKCmNsYXNzIE5ldXJhbF9OZXR3b3JrKG9iamVjdCk6CiAgZGVmIF9faW5pdF9fKHNlbGYpOgogICNwYXJhbWV0ZXJzCiAgICBzZWxmLmlucHV0U2l6ZSA9IDIKICAgIHNlbGYub3V0cHV0U2l6ZSA9IDEKICAgIHNlbGYuaGlkZGVuU2l6ZSA9IDMKCiAgI3dlaWdodHMKICAgIHNlbGYuVzEgPSBucC5yYW5kb20ucmFuZG4oc2VsZi5pbnB1dFNpemUsIHNlbGYuaGlkZGVuU2l6ZSkgIyAoM3gyKSB3ZWlnaHQgbWF0cml4IGZyb20gaW5wdXQgdG8gaGlkZGVuIGxheWVyCiAgICBzZWxmLlcyID0gbnAucmFuZG9tLnJhbmRuKHNlbGYuaGlkZGVuU2l6ZSwgc2VsZi5vdXRwdXRTaXplKSAjICgzeDEpIHdlaWdodCBtYXRyaXggZnJvbSBoaWRkZW4gdG8gb3V0cHV0IGxheWVyCgogIGRlZiBmb3J3YXJkKHNlbGYsIFgpOgogICAgI2ZvcndhcmQgcHJvcGFnYXRpb24gdGhyb3VnaCBvdXIgbmV0d29yawogICAgc2VsZi56ID0gbnAuZG90KFgsIHNlbGYuVzEpICMgZG90IHByb2R1Y3Qgb2YgWCAoaW5wdXQpIGFuZCBmaXJzdCBzZXQgb2YgM3gyIHdlaWdodHMKICAgIHNlbGYuejIgPSBzZWxmLnNpZ21vaWQoc2VsZi56KSAjIGFjdGl2YXRpb24gZnVuY3Rpb24KICAgIHNlbGYuejMgPSBucC5kb3Qoc2VsZi56Miwgc2VsZi5XMikgIyBkb3QgcHJvZHVjdCBvZiBoaWRkZW4gbGF5ZXIgKHoyKSBhbmQgc2Vjb25kIHNldCBvZiAzeDEgd2VpZ2h0cwogICAgbyA9IHNlbGYuc2lnbW9pZChzZWxmLnozKSAjIGZpbmFsIGFjdGl2YXRpb24gZnVuY3Rpb24KICAgIHJldHVybiBvCgogIGRlZiBzaWdtb2lkKHNlbGYsIHMpOgogICAgIyBhY3RpdmF0aW9uIGZ1bmN0aW9uCiAgICByZXR1cm4gMS8oMStucC5leHAoLXMpKQoKICBkZWYgc2lnbW9pZFByaW1lKHNlbGYsIHMpOgogICAgI2Rlcml2YXRpdmUgb2Ygc2lnbW9pZAogICAgcmV0dXJuIHMgKiAoMSAtIHMpCgogIGRlZiBiYWNrd2FyZChzZWxmLCBYLCB5LCBvKToKICAgICMgYmFja3dhcmQgcHJvcGFnYXRlIHRocm91Z2ggdGhlIG5ldHdvcmsKICAgIHNlbGYub19lcnJvciA9IHkgLSBvICMgZXJyb3IgaW4gb3V0cHV0CiAgICBzZWxmLm9fZGVsdGEgPSBzZWxmLm9fZXJyb3Iqc2VsZi5zaWdtb2lkUHJpbWUobykgIyBhcHBseWluZyBkZXJpdmF0aXZlIG9mIHNpZ21vaWQgdG8gZXJyb3IKCiAgICBzZWxmLnoyX2Vycm9yID0gc2VsZi5vX2RlbHRhLmRvdChzZWxmLlcyLlQpICMgejIgZXJyb3I6IGhvdyBtdWNoIG91ciBoaWRkZW4gbGF5ZXIgd2VpZ2h0cyBjb250cmlidXRlZCB0byBvdXRwdXQgZXJyb3IKICAgIHNlbGYuejJfZGVsdGEgPSBzZWxmLnoyX2Vycm9yKnNlbGYuc2lnbW9pZFByaW1lKHNlbGYuejIpICMgYXBwbHlpbmcgZGVyaXZhdGl2ZSBvZiBzaWdtb2lkIHRvIHoyIGVycm9yCgogICAgc2VsZi5XMSArPSBYLlQuZG90KHNlbGYuejJfZGVsdGEpICMgYWRqdXN0aW5nIGZpcnN0IHNldCAoaW5wdXQgLS0+IGhpZGRlbikgd2VpZ2h0cwogICAgc2VsZi5XMiArPSBzZWxmLnoyLlQuZG90KHNlbGYub19kZWx0YSkgIyBhZGp1c3Rpbmcgc2Vjb25kIHNldCAoaGlkZGVuIC0tPiBvdXRwdXQpIHdlaWdodHMKCiAgZGVmIHRyYWluKHNlbGYsIFgsIHkpOgogICAgbyA9IHNlbGYuZm9yd2FyZChYKQogICAgc2VsZi5iYWNrd2FyZChYLCB5LCBvKQoKICBkZWYgc2F2ZVdlaWdodHMoc2VsZik6CiAgICBucC5zYXZldHh0KCJ3MS50eHQiLCBzZWxmLlcxLCBmbXQ9IiVzIikKICAgIG5wLnNhdmV0eHQoIncyLnR4dCIsIHNlbGYuVzIsIGZtdD0iJXMiKQoKICBkZWYgcHJlZGljdChzZWxmKToKICAgIHByaW50ICgiUHJlZGljdGVkIGRhdGEgYmFzZWQgb24gdHJhaW5lZCB3ZWlnaHRzOiAiKQogICAgcHJpbnQgKCJJbnB1dCAoc2NhbGVkKTogXG4iICsgc3RyKHhQcmVkaWN0ZWQpKQogICAgcHJpbnQgKCJPdXRwdXQ6IFxuIiArIHN0cihzZWxmLmZvcndhcmQoeFByZWRpY3RlZCkpKQoKTk4gPSBOZXVyYWxfTmV0d29yaygpCmZvciBpIGluIHJhbmdlKDEwMDApOiAjIHRyYWlucyB0aGUgTk4gMSwwMDAgdGltZXMKICBwcmludCAoIiMgIiArIHN0cihpKSArICJcbiIpCiAgcHJpbnQgKCJJbnB1dCAoc2NhbGVkKTogXG4iICsgc3RyKFgpKQogIHByaW50ICgiQWN0dWFsIE91dHB1dDogXG4iICsgc3RyKHkpKQogIHByaW50ICgiUHJlZGljdGVkIE91dHB1dDogXG4iICsgc3RyKE5OLmZvcndhcmQoWCkpKQogIHByaW50ICgiTG9zczogXG4iICsgc3RyKG5wLm1lYW4obnAuc3F1YXJlKHkgLSBOTi5mb3J3YXJkKFgpKSkpKSAjIG1lYW4gc3VtIHNxdWFyZWQgbG9zcwogIHByaW50ICgiXG4iKQogIE5OLnRyYWluKFgsIHkpCgpOTi5zYXZlV2VpZ2h0cygpCk5OLnByZWRpY3QoKQ=="},"asBuffer":null},"loaded":true}}
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import numpy as np

# X = (hours studying, hours sleeping), y = score on test
xAll = np.array(([2, 9], [1, 5], [3, 6], [5, 10]), dtype=float) # input data
y = np.array(([92], [86], [89]), dtype=float) # output

# scale units
xAll = xAll/np.amax(xAll, axis=0) # scaling input data
y = y/100 # scaling output data (max test score is 100)

# split data
X = np.split(xAll, [3])[0] # training data
xPredicted = np.split(xAll, [3])[1] # testing data

y = np.array(([92], [86], [89]), dtype=float)
y = y/100 # max test score is 100

class Neural_Network(object):
  def __init__(self):
  #parameters
    self.inputSize = 2
    self.outputSize = 1
    self.hiddenSize = 3

  #weights
    self.W1 = np.random.randn(self.inputSize, self.hiddenSize) # (3x2) weight matrix from input to hidden layer
    self.W2 = np.random.randn(self.hiddenSize, self.outputSize) # (3x1) weight matrix from hidden to output layer

  def forward(self, X):
    #forward propagation through our network
    self.z = np.dot(X, self.W1) # dot product of X (input) and first set of 3x2 weights
    self.z2 = self.sigmoid(self.z) # activation function
    self.z3 = np.dot(self.z2, self.W2) # dot product of hidden layer (z2) and second set of 3x1 weights
    o = self.sigmoid(self.z3) # final activation function
    return o

  def sigmoid(self, s):
    # activation function
    return 1/(1+np.exp(-s))

  def sigmoidPrime(self, s):
    #derivative of sigmoid
    return s * (1 - s)

  def backward(self, X, y, o):
    # backward propagate through the network
    self.o_error = y - o # error in output
    self.o_delta = self.o_error*self.sigmoidPrime(o) # applying derivative of sigmoid to error

    self.z2_error = self.o_delta.dot(self.W2.T) # z2 error: how much our hidden layer weights contributed to output error
    self.z2_delta = self.z2_error*self.sigmoidPrime(self.z2) # applying derivative of sigmoid to z2 error

    self.W1 += X.T.dot(self.z2_delta) # adjusting first set (input --> hidden) weights
    self.W2 += self.z2.T.dot(self.o_delta) # adjusting second set (hidden --> output) weights

  def train(self, X, y):
    o = self.forward(X)
    self.backward(X, y, o)

  def saveWeights(self):
    np.savetxt("w1.txt", self.W1, fmt="%s")
    np.savetxt("w2.txt", self.W2, fmt="%s")

  def predict(self):
    print ("Predicted data based on trained weights: ")
    print ("Input (scaled): \n" + str(xPredicted))
    print ("Output: \n" + str(self.forward(xPredicted)))

NN = Neural_Network()
for i in range(1000): # trains the NN 1,000 times
  print ("# " + str(i) + "\n")
  print ("Input (scaled): \n" + str(X))
  print ("Actual Output: \n" + str(y))
  print ("Predicted Output: \n" + str(NN.forward(X)))
  print ("Loss: \n" + str(np.mean(np.square(y - NN.forward(X))))) # mean sum squared loss
  print ("\n")
  NN.train(X, y)

NN.saveWeights()
NN.predict()