# %pip install awswrangler
%pip install pycaret[full]
%pip install xgboost
# %pip install mlflow
# import awswrangler as wr
import pandas as pd
import numpy as np
# import boto3
# from sagemaker import get_execution_role
import datetime
import string
import random
from pycaret.classification import *
# data = pd.read_csv('data/data_feature_eng.csv')
data = pd.read_csv('data/feature_ori.csv')
# print(data)
m_setup = setup(data=data, target='label', normalize=False,
feature_interaction=False,
feature_ratio=False,
trigonometry_features=False,
use_gpu=True)
Description | Value | |
---|---|---|
0 | session_id | 6597 |
1 | Target | label |
2 | Target Type | Binary |
3 | Label Encoded | 0.0: 0, 1.0: 1 |
4 | Original Data | (64800, 11) |
5 | Missing Values | 0 |
6 | Numeric Features | 8 |
7 | Categorical Features | 2 |
8 | Ordinal Features | 0 |
9 | High Cardinality Features | 0 |
10 | High Cardinality Method | None |
11 | Transformed Train Set | (45359, 19) |
12 | Transformed Test Set | (19441, 19) |
13 | Shuffle Train-Test | True |
14 | Stratify Train-Test | False |
15 | Fold Generator | StratifiedKFold |
16 | Fold Number | 10 |
17 | CPU Jobs | -1 |
18 | Use GPU | 1 |
19 | Log Experiment | 0 |
20 | Experiment Name | clf-default-name |
21 | USI | 717f |
22 | Imputation Type | simple |
23 | Iterative Imputation Iteration | None |
24 | Numeric Imputer | mean |
25 | Iterative Imputation Numeric Model | None |
26 | Categorical Imputer | constant |
27 | Iterative Imputation Categorical Model | None |
28 | Unknown Categoricals Handling | least_frequent |
29 | Normalize | 0 |
30 | Normalize Method | None |
31 | Transformation | 0 |
32 | Transformation Method | None |
33 | PCA | 0 |
34 | PCA Method | None |
35 | PCA Components | None |
36 | Ignore Low Variance | 0 |
37 | Combine Rare Levels | 0 |
38 | Rare Level Threshold | None |
39 | Numeric Binning | 0 |
40 | Remove Outliers | 0 |
41 | Outliers Threshold | None |
42 | Remove Multicollinearity | 0 |
43 | Multicollinearity Threshold | None |
44 | Remove Perfect Collinearity | 1 |
45 | Clustering | 0 |
46 | Clustering Iteration | None |
47 | Polynomial Features | 0 |
48 | Polynomial Degree | None |
49 | Trignometry Features | 0 |
50 | Polynomial Threshold | None |
51 | Group Features | 0 |
52 | Feature Selection | 0 |
53 | Feature Selection Method | classic |
54 | Features Selection Threshold | None |
55 | Feature Interaction | 0 |
56 | Feature Ratio | 0 |
57 | Interaction Threshold | None |
58 | Fix Imbalance | 0 |
59 | Fix Imbalance Method | SMOTE |
# max_depth = 트리 최대 깊이
# max_leaves = 트리 최대 리프
# subsample = row sampling
# colsample_bytree = column sampling 각 이터레이션에 사용되는 칼럼의 비율
# learning_rate = 러닝레이트
# 일반적으로 row sampling 보다는 column sampling이 모형성능과 학습시간에 더 큰 영향을 준다
# xgboost = create_model('xgboost', max_depth=16, max_leaves=255)
# xgboost = create_model('xgboost')
xgboost = create_model('xgboost', max_depth=8, max_leaves=256)
# best_model = compare_models(n_select=6)
Accuracy | AUC | Recall | Prec. | F1 | Kappa | MCC | |
---|---|---|---|---|---|---|---|
0 | 0.9810 | 0.9957 | 0.9388 | 0.9360 | 0.9374 | 0.9262 | 0.9262 |
1 | 0.9861 | 0.9980 | 0.9563 | 0.9522 | 0.9542 | 0.9461 | 0.9461 |
2 | 0.9852 | 0.9967 | 0.9563 | 0.9467 | 0.9515 | 0.9428 | 0.9428 |
3 | 0.9806 | 0.9954 | 0.9432 | 0.9297 | 0.9364 | 0.9250 | 0.9250 |
4 | 0.9850 | 0.9977 | 0.9607 | 0.9415 | 0.9510 | 0.9422 | 0.9422 |
5 | 0.9843 | 0.9961 | 0.9578 | 0.9400 | 0.9488 | 0.9396 | 0.9396 |
6 | 0.9846 | 0.9965 | 0.9534 | 0.9452 | 0.9493 | 0.9402 | 0.9402 |
7 | 0.9885 | 0.9975 | 0.9665 | 0.9582 | 0.9623 | 0.9556 | 0.9556 |
8 | 0.9824 | 0.9959 | 0.9418 | 0.9418 | 0.9418 | 0.9314 | 0.9314 |
9 | 0.9830 | 0.9967 | 0.9519 | 0.9369 | 0.9443 | 0.9343 | 0.9343 |
Mean | 0.9841 | 0.9966 | 0.9527 | 0.9428 | 0.9477 | 0.9383 | 0.9383 |
SD | 0.0023 | 0.0008 | 0.0084 | 0.0078 | 0.0075 | 0.0089 | 0.0089 |
# max leaves -> 2**(n-1), n = max_depth
params = {'max_depth': [128, 64, 32],
'max_leaves': [256, 1024, 4096],
'colsample_bytree':[0.2, 0.4, 0.6, 0.8, 1.0],
'learning_rate':[0.05, 0.005]
}#range(14,17)}
# tuned_xgboost = tune_model(xgboost, optimize='F1', custom_grid=params)
tuned_xgboost = tune_model(xgboost, optimize='Accuracy', custom_grid=params, tuner_verbose=3)
# tuned_xgboost = tune_model(xgboost, optimize='Kappa')
Accuracy | AUC | Recall | Prec. | F1 | Kappa | MCC | |
---|---|---|---|---|---|---|---|
0 | 0.9784 | 0.9949 | 0.9271 | 0.9298 | 0.9285 | 0.9157 | 0.9157 |
1 | 0.9837 | 0.9974 | 0.9374 | 0.9541 | 0.9457 | 0.9361 | 0.9361 |
2 | 0.9797 | 0.9959 | 0.9287 | 0.9369 | 0.9327 | 0.9208 | 0.9208 |
3 | 0.9740 | 0.9930 | 0.9112 | 0.9165 | 0.9139 | 0.8985 | 0.8986 |
4 | 0.9821 | 0.9966 | 0.9447 | 0.9379 | 0.9413 | 0.9307 | 0.9307 |
5 | 0.9793 | 0.9950 | 0.9374 | 0.9266 | 0.9320 | 0.9198 | 0.9198 |
6 | 0.9846 | 0.9963 | 0.9389 | 0.9584 | 0.9485 | 0.9395 | 0.9395 |
7 | 0.9872 | 0.9976 | 0.9520 | 0.9632 | 0.9575 | 0.9500 | 0.9500 |
8 | 0.9775 | 0.9946 | 0.9127 | 0.9372 | 0.9248 | 0.9116 | 0.9117 |
9 | 0.9791 | 0.9952 | 0.9344 | 0.9276 | 0.9310 | 0.9187 | 0.9187 |
Mean | 0.9806 | 0.9956 | 0.9324 | 0.9388 | 0.9356 | 0.9241 | 0.9242 |
SD | 0.0037 | 0.0013 | 0.0123 | 0.0144 | 0.0121 | 0.0143 | 0.0143 |
tuned_xgboost
XGBClassifier(base_score=0.5, booster='gbtree', colsample_bylevel=1, colsample_bynode=1, colsample_bytree=0.4, enable_categorical=False, gamma=0, gpu_id=0, importance_type=None, interaction_constraints='', learning_rate=0.05, max_delta_step=0, max_depth=64, max_leaves=4096, min_child_weight=1, missing=nan, monotone_constraints='()', n_estimators=100, n_jobs=-1, num_parallel_tree=1, objective='binary:logistic', predictor='auto', random_state=6597, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1, tree_method='gpu_hist', use_label_encoder=True, validate_parameters=1, ...)
# plot_model(tuned_dt, plot='auc')
evaluate_model(tuned_xgboost)
mdl = finalize_model(tuned_xgboost)
save_model(mdl, 'tuned_xgboost_orifeature_0323')
# save_model(tuned_xgboost, 'tuned_xgboost_0207')
Transformation Pipeline and Model Successfully Saved
(Pipeline(memory=None, steps=[('dtypes', DataTypes_Auto_infer(categorical_features=[], display_types=True, features_todrop=[], id_columns=[], ml_usecase='classification', numerical_features=[], target='label', time_features=[])), ('imputer', Simple_Imputer(categorical_strategy='not_available', fill_value_categorical=None, fill_value_numerical=None, numeric_strate... max_delta_step=0, max_depth=64, max_leaves=4096, min_child_weight=1, missing=nan, monotone_constraints='()', n_estimators=100, n_jobs=-1, num_parallel_tree=1, objective='binary:logistic', predictor='auto', random_state=6597, reg_alpha=0, reg_lambda=1, scale_pos_weight=1, subsample=1, tree_method='gpu_hist', use_label_encoder=True, validate_parameters=1, ...)]], verbose=False), 'tuned_xgboost_orifeature_0323.pkl')
load_mdl = load_model('tuned_xgboost_orifeature_0323')
prediction = predict_model(load_mdl, data=data.iloc[-10000:])
prediction = prediction.astype({'Label':'float64'})
prediction.info()
from pycaret.utils import check_metric
check_metric(prediction['Label'], prediction['label'], metric = 'F1')