FML Experiment No 6

Download as docx, pdf, or txt
Download as docx, pdf, or txt
You are on page 1of 4

EXPERIMENT NO-6

AIM- Support vector machines

CODE-

# Import necessary libraries

import pandas as pd

from sklearn.model_selection import train_test_split

from sklearn.preprocessing import StandardScaler

from sklearn.metrics import accuracy_score, classification_report, confusion_matrix

from sklearn.inspection import DecisionBoundaryDisplay

from sklearn.svm import SVC

# Load the dataset

data = pd.read_csv('Solar_radiation_classification.csv')

# Inspect the data

#print(data.head())

# Define feature columns and the target column

X = data.drop('Class', axis=1) # Features (all columns except 'Class')

y = data['Class'] # Assuming 'Class' column has 3 unique classes

data['Class'].value_counts()

Class

Monitoring 576

Running 430

Inspecting 256

Name: count, dtype: int64

X.shape,y.shape

((1262, 22), (1262,))

# Split the dataset into training and testing sets (80% train, 20% test)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)

# Standardize the features (optional, but may improve performance for some models)

scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)

X_test_scaled = scaler.transform(X_test)

# Initialize the SVC classifier

sv_classifier = SVC(kernel="linear", random_state=0)

# Fit the model to the training data

sv_classifier.fit(X_train_scaled, y_train)

# Make predictions on the test data

y_pred = sv_classifier.predict(X_test_scaled)

import matplotlib.pyplot as plt

feature_names = [f'Feature {i+1}' for i in range(22)] # Modify with actual feature names if available

class_names = ['Monitoring', 'Running', 'Inspecting'] # Modify with actual class names

# Classification report for detailed metrics

print("\nClassification Report:")

print(classification_report(y_test, y_pred, target_names=class_names))

# Evaluate the model

from sklearn.metrics import


accuracy_score,recall_score,precision_score,f1_score,precision_recall_curve

accuracy = accuracy_score(y_test, y_pred)

print(f'Accuracy: {accuracy:.4f}')

recall=recall_score(y_test, y_pred,average='macro')

# macro: Unweighted average of the metrics for each class.All classes are treated equally.

print(f'Recall: {recall:.4f}')
precision=precision_score(y_test, y_pred,average='macro')

print(f'Precision: {precision:.4f}')

f1=f1_score(y_test, y_pred,average='macro')

print(f'F1-Score: {f1:.4f}')

Accuracy: 0.9723

Recall: 0.9776

Precision: 0.9704

F1-Score: 0.9738

accuracy = accuracy_score(y_test, y_pred)

print(f'Accuracy: {accuracy:.4f}')

recall=recall_score(y_test, y_pred,average='weighted')

print(f'Recall: {recall:.4f}')

#weighted: Takes class imbalance into account by weighting each class's contribution by

#its support (number of true instances).

precision=precision_score(y_test, y_pred,average='weighted')

print(f'Precision: {precision:.4f}')

f1=f1_score(y_test, y_pred,average='weighted')

print(f'F1-Score: {f1:.4f}')

Accuracy: 0.9723

Recall: 0.9723

Precision: 0.9725

F1-Score: 0.9723

print('Confusion Matrix:')

print(confusion_matrix(y_test, y_pred))

Confusion Matrix:

[[ 54 0 0]

[ 2 110 3]

[ 0 2 82]]

print('Classification Report:')

print(classification_report(y_test, y_pred))

You might also like