
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
%matplotlib inline
train_data = pd.read_csv('train.csv')
test_data = pd.read_csv('test.csv')
train_data.shape
test_data.shape
testID = pd.DataFrame({'id':test_data.Id})
trainID = pd.DataFrame({'id':train_data.Id})
full_data = pd.concat([train_data.copy().drop('SalePrice',axis=1),test_data])
train_data.head()
categorical_cols = [cname for cname in train_data.columns if
train_data[cname].dtype == 'object']
len(categorical_cols)
categorical_cols
# create functions of categorical plots
def bplot(x,a,b,c):
fig = plt.figure(figsize=(a,b))
plt.xticks(rotation=c)
return sns.barplot(x=x,y='SalePrice',data=train_data)
def bxplot(x,a,b,c):
fig = plt.figure(figsize=(a,b))
plt.xticks(rotation=c)
return sns.boxplot(x=x,y='SalePrice',data=train_data)
def vplot(x,a,b,c):
fig = plt.figure(figsize=(a,b))
plt.xticks(rotation=c)
return sns.violinplot(x=x,y='SalePrice',data=train_data)
def cplot(x,a,b,c):
fig = plt.figure(figsize=(a,b))
plt.xticks(rotation=c)
return sns.countplot(x=x,data=train_data)
bplot('SaleType',10,4,0)
bxplot('Neighborhood',12,4,40)
vplot('HouseStyle',8,4,0)
cplot('GarageType',10,4,0)
numerical_cols = [cname for cname in train_data.columns if
train_data[cname].dtype in ['int64','float64']]
numerical_cols.remove('SalePrice')
numerical_cols.remove('Id')
len(numerical_cols)
correlation = train_data[numerical_cols].corr()
fig = plt.figure(figsize=(12,12))
sns.heatmap(correlation,cmap='magma')
f = plt.figure(figsize=(12,20))
for i in range(len(numerical_cols)):
f.add_subplot(9, 4, i+1)
sns.scatterplot(train_data[numerical_cols].iloc[:,i], train_data.SalePrice, color='purple')
plt.tight_layout()
# distribution of the overall house prices
sns.distplot(train_data['SalePrice'],color='purple')
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.impute import SimpleImputer
from sklearn.preprocessing import OneHotEncoder
X = train_data.drop(['Id','SalePrice'],axis=1)
y = train_data.SalePrice
from sklearn.model_selection import train_test_split
X_train, X_valid, y_train, y_valid = train_test_split(X, y, test_size=0.3, random_state=101)
# select categorical columns
categorical_cols = [cname for cname in X_train.columns if
X_train[cname].dtype == 'object']
# select numerical columns
numerical_cols = [cname for cname in X_train.columns if
X_train[cname].dtype in['int64','float64']]
# keep selected columns only
my_cols = categorical_cols + numerical_cols
X_train = X_train[my_cols].copy()
X_valid = X_valid[my_cols].copy()
# deal with missing values for numerical data
numerical_transformer = SimpleImputer(strategy = 'mean')
# deal with missing values and get categorical variables for categorible data
categorical_transformer = Pipeline(steps=[
('imputer', SimpleImputer(strategy = 'most_frequent')),
('oneshot', OneHotEncoder(handle_unknown='ignore'))
])
# Bundle preprocessing for numerical and categorical data
preprocessor = ColumnTransformer(
transformers=[
('num',numerical_transformer,numerical_cols),
('cat',categorical_transformer,categorical_cols)
])
from sklearn.ensemble import RandomForestRegressor
from xgboost import XGBRegressor
from sklearn.metrics import mean_absolute_error
import warnings
warnings.filterwarnings('ignore')
pd.options.display.max_columns = None
# define the model
model1 = RandomForestRegressor(n_estimators=1000,random_state=0)
# bundle preprocessing and model in a pipeline
clf = Pipeline(steps=[('preprocessor',preprocessor),
('model',model1)
])
# fit the model
clf.fit(X_train,y_train)
# get predictions
preds = clf.predict(X_valid)
# cross validation
from sklearn.model_selection import cross_val_score
scores = -1 * cross_val_score(clf,X,y,cv=5,scoring = 'neg_mean_absolute_error')
print(scores.mean())
# define the model
model2 =XGBRegressor(
learning_rate =0.1,
n_estimators=1000,
max_depth=5,
min_child_weight=1,
gamma=0,
subsample=0.8,
colsample_bytree=0.8,
nthread=4,
scale_pos_weight=1,
seed=27)
# bundle preprocessing and model in a pipeline
clf = Pipeline(steps=[('preprocessor',preprocessor),
('model',model2)
])
# fit the model
clf.fit(X_train,y_train)
# get predictions
preds = clf.predict(X_valid)
# cross validation
from sklearn.model_selection import cross_val_score
scores = -1 * cross_val_score(clf,X,y,cv=10,scoring = 'neg_mean_absolute_error')
print(scores.mean())
test_data = test_data.drop('Id',axis=1)
test_pred = clf.predict(test_data)
test_pred
sp = pd.DataFrame(test_pred,columns=['SalePrice'])
output = pd.concat([testID,sp],axis=1)
output.to_csv('submission.csv', index=False)