lambda_function.py 2.9 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192
  1. import logging
  2. import s3fs
  3. import pandas as pd
  4. import ast
  5. import os
  6. logger = logging.getLogger(__name__)
  7. logger.setLevel(logging.INFO)
  8. s3fs = s3fs.S3FileSystem()
  9. def lambda_handler(event, context):
  10. region, bucket_name, object_key = parse_event(event)
  11. # bucket_name = 'homenetwork-data'
  12. # object_key = '2021/03/30/02/homenetwork-data-firehose-1-2021-03-30-02-28-48-5de99bd1-d81a-4abb-ab0f-7aa47bab1553'
  13. s3_path = "{}/{}".format(bucket_name, object_key)
  14. logger.info("start etl for file : %s", s3_path)
  15. item_lst = get_s3_file(s3_path)
  16. csv, target_date = parse_raw_to_csv(item_lst)
  17. save_path = get_save_path(target_date, object_key)
  18. try:
  19. with s3fs.open(save_path, 'w', encoding='utf-8') as f:
  20. f.write(csv)
  21. except Exception as e:
  22. logger.error('Cannot write csv to s3 : %s', e)
  23. logger.info("save success as file name : %s", save_path)
  24. def parse_event(event):
  25. record = event["Records"][0]
  26. region = record["awsRegion"]
  27. s3_record = record["s3"]
  28. bucket_name = s3_record["bucket"]["name"]
  29. object_key = s3_record["object"]["key"]
  30. logger.info("region: %s", region)
  31. logger.info("bucket_name: %s", bucket_name)
  32. logger.info("object_key: %s", object_key)
  33. return region, bucket_name, object_key
  34. def get_s3_file(s3_path):
  35. with s3fs.open(s3_path, "r", encoding='utf-8') as f:
  36. d = f.read()
  37. logger.info("success load from %s", s3_path)
  38. s = d.replace('}{', '}\n{')
  39. to_lst = s.split('\n')
  40. logger.info("raw total count : %s", len(to_lst))
  41. return to_lst
  42. def parse_raw_to_csv(to_lst):
  43. raw_lst = []
  44. for i in to_lst:
  45. data = ast.literal_eval(i)
  46. raw_lst.append(data)
  47. flat_lst = []
  48. for i in range(len(raw_lst)):
  49. items = raw_lst[i]
  50. timestamp, wallpadip, dong, ho, source = items['timestamp'], items['wallpadip'], items['dong'], items['ho'], \
  51. items['source']
  52. ui, type, menu, function, value = items['data']['ui'], items['data']['type'], items['data']['menu'], \
  53. items['data']['function'], items['data']['value']
  54. flat_lst.append((timestamp, wallpadip, dong, ho, source, ui, type, menu, function, value))
  55. columns = ['timestamp', 'wallpadip', 'dong', 'ho', 'source', 'ui', 'type', 'menu', 'function', 'value']
  56. df = pd.DataFrame(data=flat_lst, columns=columns)
  57. max_date = df['timestamp'].max()
  58. logger.info("csv total count : %s for max date %s", len(df), max_date)
  59. csv = df.to_csv()
  60. return csv, max_date
  61. def get_save_path(target_date, object_key):
  62. save_bucket_name = os.getenv("PREP_BUCKET")
  63. date_part = target_date.split('T')[0].replace('-', '/')
  64. key_part = "{}_{}.csv".format(target_date.replace(':', '-'), object_key.split('/')[-1])
  65. path = "{}/{}/{}".format(save_bucket_name, date_part, key_part)
  66. return path