-
Notifications
You must be signed in to change notification settings - Fork 1
Expand file tree
/
Copy pathpreparing_for_model.py
More file actions
90 lines (60 loc) · 2.57 KB
/
preparing_for_model.py
File metadata and controls
90 lines (60 loc) · 2.57 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
# -*- coding: utf-8 -*-
"""Preparing for Model.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/18zKZ50n37YxPrP-SWokx14AGklLCynXu
"""
import pandas as pd
import numpy as np
from google.colab import files
uploaded=files.upload()
df = pd.read_csv('stratified_train_set (6).csv')
print(df)
for column in df.columns:
missing_values = df[column].isna().sum()
if missing_values > 0:
print(f"Column '{column}' has {missing_values} missing values.")
else:
print(f"Column '{column}' has no missing values.")
total_missing_values = df.isna().sum().sum()
if total_missing_values > 0:
print(f"\nThe DataFrame has a total of {total_missing_values} missing values.")
else:
print("\nThe DataFrame has no missing values.")
"""# **Handling text attributes**"""
from sklearn.preprocessing import LabelEncoder
label_encoder = LabelEncoder()
# List of categorical columns to encode
categorical_columns = [ 'position', 'team', 'report_primary_injury', 'weather']
# Apply LabelEncoder to each categorical column
for column in categorical_columns:
df[column] = label_encoder.fit_transform(df[column])
# Display the updated DataFrame with encoded categorical columns
print(df)
"""# **One Hot Encoding**"""
import pandas as pd
from sklearn.preprocessing import OneHotEncoder
# Now, extract the columns that need one-hot encoding
columns_to_onehot_encode = [ 'season', 'age' , 'week', 'team', 'position', 'report_primary_injury', 'weather']
# Initialize OneHotEncoder
onehot_encoder = OneHotEncoder(sparse=False, drop='first')
# Fit and transform the data using OneHotEncoder
encoded_data = onehot_encoder.fit_transform(df[columns_to_onehot_encode])
# Create a DataFrame with the encoded data
encoded_df = pd.DataFrame(encoded_data, columns=onehot_encoder.get_feature_names_out(columns_to_onehot_encode))
# Drop the original columns from the original DataFrame and concatenate the encoded DataFrame
df = pd.concat([df.drop(columns=columns_to_onehot_encode), encoded_df], axis=1)
# Display the updated DataFrame
print(df)
"""# **Feature scaling**"""
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
minmax_scaler = MinMaxScaler()
# Fit and transform the entire DataFrame
scaled_data = minmax_scaler.fit_transform(df)
# Create a DataFrame with the scaled data
scaled_df = pd.DataFrame(scaled_data, columns=df.columns)
# Display the updated DataFrame
print(scaled_df.head())
# Assuming df is your preprocessed DataFrame
scaled_df.to_csv('scaled_train_set.csv', index=False)