1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192 |
- import logging
- import s3fs
- import pandas as pd
- import ast
- import os
- logger = logging.getLogger(__name__)
- logger.setLevel(logging.INFO)
- s3fs = s3fs.S3FileSystem()
- def lambda_handler(event, context):
- region, bucket_name, object_key = parse_event(event)
- # bucket_name = 'homenetwork-data'
- # object_key = '2021/03/30/02/homenetwork-data-firehose-1-2021-03-30-02-28-48-5de99bd1-d81a-4abb-ab0f-7aa47bab1553'
- s3_path = "{}/{}".format(bucket_name, object_key)
- logger.info("start etl for file : %s", s3_path)
- item_lst = get_s3_file(s3_path)
- csv, target_date = parse_raw_to_csv(item_lst)
- save_path = get_save_path(target_date, object_key)
- try:
- with s3fs.open(save_path, 'w', encoding='utf-8') as f:
- f.write(csv)
- except Exception as e:
- logger.error('Cannot write csv to s3 : %s', e)
- logger.info("save success as file name : %s", save_path)
- def parse_event(event):
- record = event["Records"][0]
- region = record["awsRegion"]
- s3_record = record["s3"]
- bucket_name = s3_record["bucket"]["name"]
- object_key = s3_record["object"]["key"]
- logger.info("region: %s", region)
- logger.info("bucket_name: %s", bucket_name)
- logger.info("object_key: %s", object_key)
- return region, bucket_name, object_key
- def get_s3_file(s3_path):
- with s3fs.open(s3_path, "r", encoding='utf-8') as f:
- d = f.read()
- logger.info("success load from %s", s3_path)
- s = d.replace('}{', '}\n{')
- to_lst = s.split('\n')
- logger.info("raw total count : %s", len(to_lst))
- return to_lst
- def parse_raw_to_csv(to_lst):
- raw_lst = []
- for i in to_lst:
- data = ast.literal_eval(i)
- raw_lst.append(data)
- flat_lst = []
- for i in range(len(raw_lst)):
- items = raw_lst[i]
- timestamp, wallpadip, dong, ho, source = items['timestamp'], items['wallpadip'], items['dong'], items['ho'], \
- items['source']
- ui, type, menu, function, value = items['data']['ui'], items['data']['type'], items['data']['menu'], \
- items['data']['function'], items['data']['value']
- flat_lst.append((timestamp, wallpadip, dong, ho, source, ui, type, menu, function, value))
- columns = ['timestamp', 'wallpadip', 'dong', 'ho', 'source', 'ui', 'type', 'menu', 'function', 'value']
- df = pd.DataFrame(data=flat_lst, columns=columns)
- max_date = df['timestamp'].max()
- logger.info("csv total count : %s for max date %s", len(df), max_date)
- csv = df.to_csv()
- return csv, max_date
- def get_save_path(target_date, object_key):
- save_bucket_name = os.getenv("PREP_BUCKET")
- date_part = target_date.split('T')[0].replace('-', '/')
- key_part = "{}_{}.csv".format(target_date.replace(':', '-'), object_key.split('/')[-1])
- path = "{}/{}/{}".format(save_bucket_name, date_part, key_part)
- return path
|