import json
import urllib.parse
import boto3
from csv import reader
import pandas as pd
import pymysql
import numpy as np
import awswrangler as wr

rds_host_write  = "database-ambt.cluster-cvnwxgmngsms.ap-northeast-2.rds.amazonaws.com"
rds_host_read  = "database-ambt.cluster-ro-cvnwxgmngsms.ap-northeast-2.rds.amazonaws.com"
name = "admin"
password = "hdci12#$"
db_name = "ambt"

PORT=3306

def lambda_handler(event, context):
    
    conn =  pymysql.connect(host=rds_host_read, user=name, passwd=password, port=PORT, database=db_name, charset='utf8')
    #conn = pymysql.connect(host=rds_host, user=name, passwd=password, db=db_name, use_unicode=True, charset='utf8', connect_timeout=5)
   
    sql_statement = "SELECT * FROM `ambt.icos`.facility_type;"
    facility_type_id = pd.read_sql(sql=sql_statement, con=conn)
    
    sql_statement = "SELECT * FROM `ambt.icos`.facility_code;"
    material_code = pd.read_sql(sql=sql_statement, con=conn)
    
    sql_statement = "SELECT * FROM `ambt.icos`.control_value;"
    control_value = pd.read_sql(sql=sql_statement, con=conn)
    
    sql_statement = "SELECT * FROM `ambt.icos`.value_type;"
    value_type = pd.read_sql(sql=sql_statement, con=conn)
    
    sql_statement = "SELECT * FROM `ambt.icos`.ambt_anoicos_code;"
    mappingTable = pd.read_sql(sql=sql_statement, con=conn)
    print(mappingTable.head())
    
    for record in event['Records']:
        #print("Received event: " + json.dumps(event, indent=2))
        bucket = record['s3']['bucket']['name'] 
        key = urllib.parse.unquote_plus(record['s3']['object']['key'])
    
        print('bucket : ', bucket)
        print('key : ', key)
        
        s3 = boto3.client('s3')
        obj = s3.get_object(Bucket=bucket, Key=key)
        raw_data = pd.read_csv(obj['Body'], sep=',')
        raw_data = raw_data.drop(['cnt'], axis=1)
        # raw_data = raw_data.drop(['Unnamed: 0'], axis=1)
        raw_data = raw_data.dropna(axis=0)      # drop the row including null
        raw_data = raw_data.reset_index(drop=True)
        print('raw', raw_data.columns)
        print (raw_data[130:140])
        print('length of raw data', len(raw_data))
        
        
        # --------------- Data pre-processing (considering data minimum properties)
        # temperature, humidity (set point, ) - -15 ~ 100
        # operating status - 0/1
        # open ratio (SP) - 0 ~ 100
        # operating mode - 0 ~ 10
        
        processed_data = raw_data.copy()
        null_sum = pd.DataFrame(np.zeros((1,len(raw_data.columns))), columns = raw_data.columns)
        # control_value = 3(온도), 4(습도), 5(온도 설정값), 6(습도 설정값)
        target_row = mappingTable.loc[(mappingTable['site_id']==1) & (mappingTable['collect_status'] == 1)
                                 & ((mappingTable['control_value'] == 3) | (mappingTable['control_value'] == 4)
                                  | (mappingTable['control_value'] == 5) | (mappingTable['control_value'] == 6))]
              
        for c_idx in range(raw_data[list(set(target_row['raw_tag']))].shape[1]):
            processed_data[list(set(target_row['raw_tag']))[c_idx]] \
            = [r_value if r_value > -15 and r_value <= 100 else np.nan \
            for r_value in raw_data[list(set(target_row['raw_tag']))[c_idx]]]
            null_sum[list(set(target_row['raw_tag']))[c_idx]][0] \
            = processed_data[list(set(target_row['raw_tag']))[c_idx]].isnull().sum()
    
        # control_value = 1(운전상태)
        target_row = mappingTable.loc[(mappingTable['site_id']==1) & (mappingTable['collect_status'] == 1)
                                 & (mappingTable['control_value'] == 1) ]
        for c_idx in range(raw_data[list(set(target_row['raw_tag']))].shape[1]):
            processed_data[list(set(target_row['raw_tag']))[c_idx]] \
            = [r_value if r_value == 0 or r_value == 1 else np.nan \
            for r_value in raw_data[list(set(target_row['raw_tag']))[c_idx]]]
    
            null_sum[list(set(target_row['raw_tag']))[c_idx]][0] \
            = processed_data[list(set(target_row['raw_tag']))[c_idx]].isnull().sum()        
    
        # control_value = 7(개도율), 8(개도율 설정값)
        target_row = mappingTable.loc[(mappingTable['site_id']==1) & (mappingTable['collect_status'] == 1) 
        & ((mappingTable['control_value'] == 7) | (mappingTable['control_value'] == 8)) ]
        for c_idx in range(raw_data[list(set(target_row['raw_tag']))].shape[1]):
            processed_data[list(set(target_row['raw_tag']))[c_idx]] \
            = [r_value if r_value >= 0 or r_value <= 100 else np.nan \
            for r_value in raw_data[list(set(target_row['raw_tag']))[c_idx]]]
    
            null_sum[list(set(target_row['raw_tag']))[c_idx]][0] \
            = processed_data[list(set(target_row['raw_tag']))[c_idx]].isnull().sum()  
    
        # control_value = 30(운전모드)
        target_row = mappingTable.loc[(mappingTable['site_id']==1) & (mappingTable['collect_status'] == 1)
                                 & (mappingTable['control_value'] == 30) ]
        for c_idx in range(raw_data[list(set(target_row['raw_tag']))].shape[1]):
            processed_data[list(set(target_row['raw_tag']))[c_idx]] \
            = [r_value if r_value >= 0 and r_value <= 10 else np.nan \
            for r_value in raw_data[list(set(target_row['raw_tag']))[c_idx]]]
    
            null_sum[list(set(target_row['raw_tag']))[c_idx]][0] \
            = processed_data[list(set(target_row['raw_tag']))[c_idx]].isnull().sum()  
        print('sum of null point:', null_sum.sum())

        # --------------- Fill the missing data and round off time index
        time_resolution = 5
        #data = raw_data
        round_m_comp = 0
        missing_idx = []
        missing_date = []
        print('len(processed_data))',len(processed_data))
        for time_idx in range(len(processed_data)):
            str_date = processed_data['time'][time_idx]
            date = str_date.split()[0]
            round_m = int(int(str_date.split()[1].split(':')[1])/time_resolution)*time_resolution
            h, m, s = [str_date.split()[1].split(':')[0],
                       str(round_m) if round_m >= 10 else '0'+str(round_m),
                       '00']
            
            round_m_comp += time_resolution  # for compare time index
            # ----- find the missing row ----- #
            while round_m_comp <= int(h) * 60 + round_m:
                round_m_comp_tmp = round_m_comp - time_resolution
                round_h_tmp = int(round_m_comp_tmp/60)
                round_m_tmp = int(round_m_comp_tmp%60)
                h_tmp, m_tmp, s_tmp = [str(round_h_tmp) if round_h_tmp >= 10 else '0'+str(round_h_tmp),
                                       str(round_m_tmp) if round_m_tmp >= 10 else '0'+str(round_m_tmp),
                                       '00']
                missing_date.append(date + ' ' + h_tmp + ':' + m_tmp + ':' + s_tmp)
                missing_idx.append(time_idx + len(missing_idx))   # save missing index considering append index
                round_m_comp += time_resolution      
            # ----- find the missing row ----- #        
            processed_data.at[time_idx,'time'] = date + ' ' + h + ':' + m + ':' + s # round off time index for rows of normal data 
        print('len', len(processed_data))
        print('sum of missing row data:', len(missing_idx))
        
        # ----- fill nan on the missing row ----- #
        idx = 0
        for miss_idx in missing_idx:
            tmp_data = np.zeros((1,len(raw_data.columns)))
            tmp_data[:] = np.nan
            reconstructed_data = pd.DataFrame(tmp_data, columns = raw_data.columns)
            
            idx_temp = miss_idx
            temp1 = processed_data[processed_data.index < idx_temp].copy()
            temp2 = processed_data[processed_data.index >= idx_temp].copy()
            
            temp1 = temp1.append(reconstructed_data, ignore_index=True)
            processed_data = temp1.append(temp2, ignore_index=True)
            
            processed_data.at[idx_temp, 'time'] = missing_date[idx]
            idx += 1

        # ----- fill nan on the missing row ----- #
        
        # ----- interpolation ----- #
        # ControlValue = 3(온도), 4(습도)
        TemHum_row = mappingTable.loc[(mappingTable['site_id']==1) & (mappingTable['collect_status'] == 1) 
                                 & ((mappingTable['control_value'] == 3) | (mappingTable['control_value'] == 4)) ]

        temp1 = processed_data[list(processed_data[list(set(TemHum_row['raw_tag']))])].fillna(method='pad').copy()
        
        # ControlValue = 1(운전상태(On/off)), 7(개도율), 8(개도율 설정값), 19(차압), 30(운전모드)
        StatusOpenRatio_row = mappingTable.loc[(mappingTable['site_id']==1) & (mappingTable['collect_status'] == 1) 
                                          & ((mappingTable['control_value'] == 1) | (mappingTable['control_value'] == 7) 
                                          | (mappingTable['control_value'] == 8) | (mappingTable['control_value'] == 19) 
                                          | (mappingTable['control_value'] == 30)) ]

        temp2 = processed_data[list(processed_data[list(set(StatusOpenRatio_row['raw_tag']))])].fillna(method='pad').copy()
        others_tag = [x for x in list(set(raw_data.columns)) if x not in list(set(TemHum_row['raw_tag']))]
        others_tag = [x for x in others_tag if x not in list(set(StatusOpenRatio_row['raw_tag']))]
        #print('iik',list(set(raw_data.columns)).remove(list(set(TemHum_row['raw_tag'])) + list(set(StatusOpenRatio_row['raw_tag']))))
        others = processed_data[others_tag].fillna(method='pad').copy()
        processed_data = pd.concat([others, temp1, temp2], axis=1)
        processed_data = processed_data.round(1)

        # ----- interpolation ----- #
        print('processed', processed_data)
        
        # ----- load to S3 prep bucket ----- #
        
        curated_file_key = key.replace(key.split('/')[-1], '')
        target_path = 's3://hdci-ambt-anoicos-prep/{}'.format(curated_file_key)
        print('target_path', target_path)
        wr.s3.to_csv(
                        df=processed_data,
                        path=target_path,
                        mode='overwrite',
                        dataset=True
                        )

        
        # ----- load to S3 prep bucket ----- #