Experiment 2
Experiment 2
Experiment 2
Objectives:
Understand the perceptron learning algorithm. 2- Implement the perceptron
learning algorithm using Python. 3- Run real-world applications in Python.
Introduction:
The idea of a Perceptron is analogous to the operating principle of the basic
processing unit of the brain — Neuron. A Neuron is comprised of many input
signals carried by Dendrites, the cell body, and one output signal carried along
Axon. The Neuron fires an action signal when the cell meets a particular
threshold. This action either happens or they don’t; there is no such thing as a
“partial” firing of a neuron. Similarly, the perceptron has many inputs(often called
features) that are fed into a Linear unit that produces one binary output.
Therefore, perceptrons can be applied in solving Binary Classification problems
where the sample is to be identified as belonging to one of the predefined two
classes.
The code:
Experiment 1:
import numpy
import random
import numpy as np
from matplotlib import pyplot as plt
w1 = random.uniform(-5, 5)
w2 = random.uniform(-5, 5)
threshhold = random.uniform(-5, 5)
training_x = numpy.asarray([[0,0], [0,1], [1,0], [1,1]])
out = np.array([0,0,0,1])
def positive(number):
if(number >= 0):
return 1
else:
return 0
error = numpy.array([0,0,0,0])
for j in range(len(training_x)):
check = positive(numpy.dot(numpy.asarray([w1,w2]), training_x[j]) +
threshhold)
error[j] = out[j] -check
errornumber = numpy.sum(error)
iterations = 1000
count = 1
eta = 0.1
values = [w1, w2, threshhold]
while count < iterations and errornumber != 0:
for j in range(len(training_x)):
check = positive(numpy.dot(numpy.asarray([w1,w2]),
training_x[j]) + threshhold)
error[j] = out[j] -check
w1 = values[0] + eta * error[j]*training_x[j][0]
w2 = values[1] + eta * error[j]*training_x[j][0]
threshhold = values[2] + eta*training_x[j][0]
values = [w1, w2, threshhold]
errornumber = numpy.sum(error)
print("ERRORS: " + str(errornumber))
count += 1
w1 = random.uniform(-5, 5)
w2 = random.uniform(-5, 5)
threshhold = random.uniform(-5, 5)
training_x = numpy.asarray([[0,0], [0,1], [1,0], [1,1]])
out = np.array([0,1,1,1])
def positive(number):
if(number >= 0):
return 1
else:
return 0
error = numpy.array([0,0,0,0])
for j in range(len(training_x)):
check = positive(numpy.dot(numpy.asarray([w1,w2]), training_x[j]) +
threshhold)
error[j] = out[j] -check
errornumber = numpy.sum(error)
iterations = 1000
count = 1
eta = 0.1
values = [w1, w2, threshhold]
while count < iterations and errornumber != 0:
for j in range(len(training_x)):
check = positive(numpy.dot(numpy.asarray([w1,w2]),
training_x[j]) + threshhold)
error[j] = out[j] -check
w1 = values[0] + eta * error[j]*training_x[j][0]
w2 = values[1] + eta * error[j]*training_x[j][0]
threshhold = values[2] + eta*training_x[j][0]
values = [w1, w2, threshhold]
errornumber = numpy.sum(error)
print("ERRORS: " + str(errornumber))
count += 1
print("w1 " + str(values[0]) + "w2 " + str(values[1]) + "theta " +
str(values[2]))
print("count " + str(count))
x_values = np.linspace(-0.5, 1.5, 100)
y_values = (-threshhold - w1 * x_values) / w2
Experiment 3:
import numpy as np
import matplotlib.pyplot as plt
import random
import pandas as pd
#import dataset
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
header=None)
#get the first 50 rows (which are all setosas) with only the first 2 features (petal length & width)
setosa = np.array(df.iloc[50:100, [0,2]])
#get rows 50-100 (which are all versicolor) with only the first 2 features (petal length & width)
versicolor = np.array(df.iloc[100:150, [0,2]])
#plot points where blue o's denote setosa & red x's denote versicolor
plt.plot(setosa[:, 0], setosa[:, 1], "bo", label="Setosa")
plt.plot(versicolor[:, 0], versicolor[:, 1], "rx", label="Versicolor")
plt.xlabel("petal width")
plt.ylabel("petal length")
plt.legend(loc='upper left')
plt.show()
class Perceptron():
#initialize hyperparameters (learning rate and number of iterations)
def __init__(self, eta=0.1, n_iter=50):
self.eta = eta
self.n_iter = n_iter
def fit(self, X, y):
self.w_ = [random.uniform(-1.0, 1.0) for _ in range(1 + X.shape[1])] # randomly initialize
weights
self.errors_ = [] # keeps tracks of the number of errors per iteration for observation
purposes
# iterate over labelled dataset updating weights for each features accordingly
for _ in range(self.n_iter):
errors = 0
for xi, label in zip(X, y):
update = self.eta * (label - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
# compute the net input i.e scalar sum of X and the weights plus the bias value
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
plt.show()
import numpy as np
import matplotlib.pyplot as plt
import random
import pandas as pd
#import dataset
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
header=None)
#get the first 50 rows (which are all setosas) with only the first 2 features (petal length & width)
setosa = np.array(df.iloc[50:150, [2,3]])
#get rows 50-100 (which are all versicolor) with only the first 2 features (petal length & width)
versicolor = np.array(df.iloc[50:150, [2,3]])
#plot points where blue o's denote setosa & red x's denote versicolor
plt.plot(setosa[:, 0], setosa[:, 1], "bo", label="Setosa")
plt.plot(versicolor[:, 0], versicolor[:, 1], "rx", label="Versicolor")
plt.xlabel("petal width")
plt.ylabel("petal length")
plt.legend(loc='upper left')
plt.show()
class Perceptron():
#initialize hyperparameters (learning rate and number of iterations)
def __init__(self, eta=0.1, n_iter=50):
self.eta = eta
self.n_iter = n_iter
# iterate over labelled dataset updating weights for each features accordingly
for _ in range(self.n_iter):
errors = 0
for xi, label in zip(X, y):
update = self.eta * (label - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
# compute the net input i.e scalar sum of X and the weights plus the bias value
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]
import numpy as np
import matplotlib.pyplot as plt
import random
import pandas as pd
#import dataset
df = pd.read_csv('https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data',
header=None)
#get the first 50 rows (which are all setosas) with only the first 2 features (petal length & width)
setosa = np.array(df.iloc[0:50, [0,2]])
#get rows 50-100 (which are all versicolor) with only the first 2 features (petal length & width)
versicolor = np.array(df.iloc[50:100, [0,2]])
#plot points where blue o's denote setosa & red x's denote versicolor
plt.plot(setosa[:, 0], setosa[:, 1], "bo", label="Setosa")
plt.plot(versicolor[:, 0], versicolor[:, 1], "rx", label="Versicolor")
plt.xlabel("petal width")
plt.ylabel("petal length")
plt.legend(loc='upper left')
plt.show()
class Perceptron():
#initialize hyperparameters (learning rate and number of iterations)
def __init__(self, eta=0.01, n_iter=50):
self.eta = eta
self.n_iter = n_iter
# iterate over labelled dataset updating weights for each features accordingly
for _ in range(self.n_iter):
errors = 0
for xi, label in zip(X, y):
update = self.eta * (label - self.predict(xi))
self.w_[1:] += update * xi
self.w_[0] += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
# compute the net input i.e scalar sum of X and the weights plus the bias value
def net_input(self, X):
return np.dot(X, self.w_[1:]) + self.w_[0]