FML Experiment No 6
FML Experiment No 6
FML Experiment No 6
CODE-
import pandas as pd
data = pd.read_csv('Solar_radiation_classification.csv')
#print(data.head())
data['Class'].value_counts()
Class
Monitoring 576
Running 430
Inspecting 256
X.shape,y.shape
# Split the dataset into training and testing sets (80% train, 20% test)
# Standardize the features (optional, but may improve performance for some models)
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train)
X_test_scaled = scaler.transform(X_test)
sv_classifier.fit(X_train_scaled, y_train)
y_pred = sv_classifier.predict(X_test_scaled)
feature_names = [f'Feature {i+1}' for i in range(22)] # Modify with actual feature names if available
print("\nClassification Report:")
print(f'Accuracy: {accuracy:.4f}')
recall=recall_score(y_test, y_pred,average='macro')
# macro: Unweighted average of the metrics for each class.All classes are treated equally.
print(f'Recall: {recall:.4f}')
precision=precision_score(y_test, y_pred,average='macro')
print(f'Precision: {precision:.4f}')
f1=f1_score(y_test, y_pred,average='macro')
print(f'F1-Score: {f1:.4f}')
Accuracy: 0.9723
Recall: 0.9776
Precision: 0.9704
F1-Score: 0.9738
print(f'Accuracy: {accuracy:.4f}')
recall=recall_score(y_test, y_pred,average='weighted')
print(f'Recall: {recall:.4f}')
#weighted: Takes class imbalance into account by weighting each class's contribution by
precision=precision_score(y_test, y_pred,average='weighted')
print(f'Precision: {precision:.4f}')
f1=f1_score(y_test, y_pred,average='weighted')
print(f'F1-Score: {f1:.4f}')
Accuracy: 0.9723
Recall: 0.9723
Precision: 0.9725
F1-Score: 0.9723
print('Confusion Matrix:')
print(confusion_matrix(y_test, y_pred))
Confusion Matrix:
[[ 54 0 0]
[ 2 110 3]
[ 0 2 82]]
print('Classification Report:')
print(classification_report(y_test, y_pred))