@@ -29,7 +29,8 @@ def train(self, lr=0.1, input=None, L2_reg=0.00):
29
29
if input is not None :
30
30
self .x = input
31
31
32
- p_y_given_x = sigmoid (numpy .dot (self .x , self .W ) + self .b )
32
+ # p_y_given_x = sigmoid(numpy.dot(self.x, self.W) + self.b)
33
+ p_y_given_x = softmax (numpy .dot (self .x , self .W ) + self .b )
33
34
d_y = self .y - p_y_given_x
34
35
35
36
self .W += lr * numpy .dot (self .x .T , d_y ) - lr * L2_reg * self .W
@@ -39,7 +40,8 @@ def train(self, lr=0.1, input=None, L2_reg=0.00):
39
40
# return cost
40
41
41
42
def negative_log_likelihood (self ):
42
- sigmoid_activation = sigmoid (numpy .dot (self .x , self .W ) + self .b )
43
+ # sigmoid_activation = sigmoid(numpy.dot(self.x, self.W) + self.b)
44
+ sigmoid_activation = softmax (numpy .dot (self .x , self .W ) + self .b )
43
45
44
46
cross_entropy = - numpy .mean (
45
47
numpy .sum (self .y * numpy .log (sigmoid_activation ) +
@@ -50,7 +52,8 @@ def negative_log_likelihood(self):
50
52
51
53
52
54
def predict (self , x ):
53
- return sigmoid (numpy .dot (x , self .W ) + self .b )
55
+ # return sigmoid(numpy.dot(x, self.W) + self.b)
56
+ return softmax (numpy .dot (x , self .W ) + self .b )
54
57
55
58
56
59
def test_lr (learning_rate = 0.01 , n_epochs = 200 ):
0 commit comments