Deep Learning Assignments
Deep Learning Assignments
202318035
DATASET 1: SUPERSTORE
In [ ]: import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error, r2_score
CA-
Second Claire United
0 1 2016- 11/8/2016 11/11/2016 CG-12520 Consumer Henderso
Class Gute States
152156
CA-
Second Claire United
1 2 2016- 11/8/2016 11/11/2016 CG-12520 Consumer Henderso
Class Gute States
152156
CA-
Second Darrin United L
2 3 2016- 6/12/2016 6/16/2016 DV-13045 Corporate
Class Van Huff States Angel
138688
US-
Standard Sean United Fo
3 4 2015- 10/11/2015 10/18/2015 SO-20335 Consumer
Class O'Donnell States Lauderda
108966
US-
Standard Sean United Fo
4 5 2015- 10/11/2015 10/18/2015 SO-20335 Consumer
Class O'Donnell States Lauderda
108966
5 rows × 21 columns
Row ID 0
Order ID 0
Order Date 0
Ship Date 0
Ship Mode 0
Customer ID 0
Customer Name 0
Segment 0
Country 0
City 0
State 0
Postal Code 0
Region 0
Product ID 0
Category 0
Sub-Category 0
Product Name 0
Sales 0
Quantity 0
Discount 0
Profit 0
dtype: int64
In [ ]: # Convert 'Order Date' and 'Ship Date' to datetime format, handling errors
superstore_data['Order Date'] = pd.to_datetime(superstore_data['Order Date'], format='%Y-%m-%d
superstore_data['Ship Date'] = pd.to_datetime(superstore_data['Ship Date'], format='%Y-%m-%d'
Out[ ]: Row ID Order Date Ship Date Postal Code Sales Quantity
2016-05-01 2016-05-04
mean 4967.272648 55016.047779 223.480623 3.794738
01:06:07.237527808 23:59:49.277736704
2014-01-03 2014-01-07
min 1.000000 1040.000000 0.990000 1.000000
00:00:00 00:00:00
2015-05-26 2015-05-29
25% 2483.250000 21843.250000 17.940000 2.000000
00:00:00 00:00:00
2016-06-28 2016-07-02
50% 4965.000000 53711.000000 50.965000 3.000000
00:00:00 00:00:00
2017-05-15 2017-05-19
75% 7427.250000 90045.000000 191.976000 5.000000
00:00:00 18:00:00
2017-12-30 2018-01-05
max 9994.000000 99301.000000 17499.950000 14.000000
00:00:00 00:00:00
In [ ]: superstore_data.info()
<class 'pandas.core.frame.DataFrame'>
Index: 8058 entries, 0 to 9993
Data columns (total 21 columns):
# Column Non-Null Count Dtype
--- ------ -------------- -----
0 Row ID 8058 non-null int64
1 Order ID 8058 non-null object
2 Order Date 8058 non-null datetime64[ns]
3 Ship Date 8058 non-null datetime64[ns]
4 Ship Mode 8058 non-null object
5 Customer ID 8058 non-null object
6 Customer Name 8058 non-null object
7 Segment 8058 non-null object
8 Country 8058 non-null object
9 City 8058 non-null object
10 State 8058 non-null object
11 Postal Code 8058 non-null int64
12 Region 8058 non-null object
13 Product ID 8058 non-null object
14 Category 8058 non-null object
15 Sub-Category 8058 non-null object
16 Product Name 8058 non-null object
17 Sales 8058 non-null float64
18 Quantity 8058 non-null int64
19 Discount 8058 non-null float64
20 Profit 8058 non-null float64
dtypes: datetime64[ns](2), float64(3), int64(3), object(13)
memory usage: 1.4+ MB
In [ ]: import tensorflow as tf
# Compile the model with the Adam optimizer and mean squared error loss
neural_network.compile(optimizer='adam', loss='mean_squared_error')
# Calculate and print the Mean Squared Error for the model
mean_squared_error_nn = mean_squared_error(y_test, predicted_values_nn)
print(f"Mean Squared Error (NN): {mean_squared_error_nn}")
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWarning: Do not
pass an `input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer us
ing an `Input(shape)` object as the first layer in the model instead.
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
Epoch 1/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 3s 4ms/step - loss: 47668.7617 - val_loss: 51688.6797
Epoch 2/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 64383.8750 - val_loss: 46871.4219
Epoch 3/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 33989.1836 - val_loss: 41950.8047
Epoch 4/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 40321.8438 - val_loss: 37491.6797
Epoch 5/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 35579.7109 - val_loss: 31717.7617
Epoch 6/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 29560.2715 - val_loss: 28373.3945
Epoch 7/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 34109.1094 - val_loss: 25534.1855
Epoch 8/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 51172.4766 - val_loss: 20007.9590
Epoch 9/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 35456.3477 - val_loss: 17684.2188
Epoch 10/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 27014.1738 - val_loss: 16791.4980
Epoch 11/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 25342.3340 - val_loss: 15307.5625
Epoch 12/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - loss: 19282.0391 - val_loss: 13753.0234
Epoch 13/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 32910.4805 - val_loss: 9830.8936
Epoch 14/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - loss: 28342.3164 - val_loss: 8664.4775
Epoch 15/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 21314.8379 - val_loss: 7692.1582
Epoch 16/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 29441.5254 - val_loss: 5153.7827
Epoch 17/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 20392.1309 - val_loss: 5785.3110
Epoch 18/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 30496.3496 - val_loss: 3993.5117
Epoch 19/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 27584.6875 - val_loss: 3594.0852
Epoch 20/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 20475.7793 - val_loss: 3755.9656
Epoch 21/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 61953.0586 - val_loss: 1671.9246
Epoch 22/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 26770.6309 - val_loss: 2593.4043
Epoch 23/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 10425.8662 - val_loss: 2277.5725
Epoch 24/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 19270.7734 - val_loss: 1250.8818
Epoch 25/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 14116.5947 - val_loss: 1505.1302
Epoch 26/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 14105.9600 - val_loss: 1167.1968
Epoch 27/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 25164.5859 - val_loss: 2049.8232
Epoch 28/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 26295.7676 - val_loss: 2879.0378
Epoch 29/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 28493.4883 - val_loss: 1048.2556
Epoch 30/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 17539.0742 - val_loss: 4008.9102
Epoch 31/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step - loss: 8479.7100 - val_loss: 1161.5667
Epoch 32/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 15669.1748 - val_loss: 1503.3073
Epoch 33/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - loss: 28940.2637 - val_loss: 1834.2437
Epoch 34/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - loss: 13616.1494 - val_loss: 1704.3368
Epoch 35/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - loss: 5860.9961 - val_loss: 2200.2449
Epoch 36/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 5ms/step - loss: 16617.9785 - val_loss: 4047.9802
Epoch 37/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 32050.4023 - val_loss: 2532.6729
Epoch 38/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 16179.7148 - val_loss: 4513.4199
Epoch 39/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 21568.2441 - val_loss: 5153.5933
Epoch 40/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 24783.0625 - val_loss: 5456.6978
Epoch 41/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 15170.2783 - val_loss: 1733.7618
Epoch 42/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 32876.7031 - val_loss: 1131.5521
Epoch 43/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 14524.8955 - val_loss: 1701.3094
Epoch 44/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 0s 2ms/step - loss: 9640.7227 - val_loss: 4312.9932
Epoch 45/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 15651.7090 - val_loss: 2375.5364
Epoch 46/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 14777.8525 - val_loss: 3953.7329
Epoch 47/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 4ms/step - loss: 5551.8149 - val_loss: 4933.4780
Epoch 48/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 6877.3472 - val_loss: 2246.7551
Epoch 49/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 14761.7148 - val_loss: 8128.3403
Epoch 50/50
162/162 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 13291.8027 - val_loss: 6345.6904
51/51 ━━━━━━━━━━━━━━━━━━━━ 0s 3ms/step
Mean Squared Error: 6065.888140869391
R-squared: 0.7867673897551717
In [ ]: import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
In [ ]: import torch
from torch.utils.data import DataLoader, TensorDataset
In [ ]: import torch
import torch.nn as nn
class FeedForwardNN(nn.Module):
def __init__(self):
super(FeedForwardNN, self).__init__()
# Define the network layers
self.fc1 = nn.Linear(X_train_tensor.shape[1], 64)
self.bn1 = nn.BatchNorm1d(64)
self.fc2 = nn.Linear(64, 64)
self.bn2 = nn.BatchNorm1d(64)
self.fc_out = nn.Linear(64, 1)
total_loss += loss.item()