log.py 6.56 KB
Newer Older
崔为之's avatar
崔为之 committed
1 2 3 4 5 6 7 8 9
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
# @Version     : Python 3.11.4
# @Software    : Sublime Text 4
# @Author      : StudentCWZ
# @Email       : StudentCWZ@outlook.com
# @Date        : 2023/11/20 09:57
# @File        : log.py
崔为之's avatar
崔为之 committed
10
# @Description : Defines the Log dao for the application.
崔为之's avatar
崔为之 committed
11 12
"""

崔为之's avatar
崔为之 committed
13 14
from datetime import datetime
from typing import Generator
崔为之's avatar
崔为之 committed
15 16

from loguru import logger
崔为之's avatar
崔为之 committed
17
from sqlalchemy import text
崔为之's avatar
崔为之 committed
18 19 20

from application.libs.helper import MySQLHelper
from application.models import Log
崔为之's avatar
崔为之 committed
21
from application.utils import ElasticsearchUtil, ParseUtil
崔为之's avatar
崔为之 committed
22 23 24
from application.extensions.init_sqlalchemy import db

created_partitions = set()  # Now it's a global variable
崔为之's avatar
崔为之 committed
25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93


class LogDao:
    """
    Data Access Object for logs.
    This class includes methods to get data from Elasticsearch, process it and save it to MySQL.
    """

    @classmethod
    def get_data_from_es(cls, index=None, dsl=None, sid=None) -> dict:
        """
        Get data from Elasticsearch by either scroll searching or direct searching.

        Args:
            index (str): The name of the Elasticsearch index.
            dsl (dict): The DSL query for Elasticsearch.
            sid (str): The scroll id for Elasticsearch scroll search.

        Returns:
            dict: The data returned from Elasticsearch.

        Raises:
            SystemError: If none of the required parameters are provided.
        """
        if sid is not None:
            return ElasticsearchUtil.scroll_search(sid)
        elif index is not None and dsl is not None:
            return ElasticsearchUtil.search(index, dsl)
        else:
            raise SystemError('Could not get data from Elasticsearch')

    @classmethod
    def get_mdata(cls, data: dict) -> list:
        """
        Get metadata from the data returned by Elasticsearch.

        Args:
            data (dict): The data returned from Elasticsearch.

        Returns:
            list: The metadata extracted from the data.

        Raises:
            SystemError: If the metadata is empty.
        """
        mdata = data.get('hits').get('hits')
        if not mdata:
            logger.error('the mdata is an empty list ...')
            raise SystemError('the mdata is an empty list ...')
        return mdata

    @classmethod
    def get_intent_from_mysql(cls, sql: str, cfg: dict) -> list:
        """
        Get the intent mapping from MySQL using the provided SQL.

        Args:
            sql (str): The SQL query to execute.
            cfg (dict): The configuration for MySQL.

        Returns:
            list: The intent mapping list.
        """
        with MySQLHelper(**cfg) as helper:
            result = helper.execute(sql)
        mapping_list = [item[0] for item in result]
        return mapping_list

    @classmethod
崔为之's avatar
崔为之 committed
94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115
    def batch_save(cls, objects: Generator) -> None:
        global created_partitions  # Reference the global variable
        for obj in objects:
            log = Log(**obj)
            # Convert string to datetime object
            date_time_obj = datetime.strptime(log.date_time, '%Y-%m-%d %H:%M:%S')
            partition_date = date_time_obj.strftime('%Y_%m')
            partition_name = f'{log.__tablename__}_{partition_date}'

            if partition_name not in created_partitions:
                db.session.execute(text(f"""
                CREATE TABLE IF NOT EXISTS {partition_name} PARTITION OF {log.__tablename__}
                FOR VALUES FROM ('{date_time_obj.strftime('%Y-%m-01')}') TO 
                ('{date_time_obj.strftime('%Y-%m-01')}'::date + interval '1 month');
                """))
                created_partitions.add(partition_name)  # Remember that this partition has been created

            db.session.add(log)
        db.session.commit()

    @classmethod
    def process_and_save_data(cls, lst: list, mapping_list: list) -> None:
崔为之's avatar
崔为之 committed
116 117 118 119 120 121 122 123 124 125
        """
        Process the given list using the mapping list and save the result to the database.

        Args:
            lst (list): The list to process.
            mapping_list (list): The mapping list to use for processing.
        """
        if not lst:
            return
        result_generator = ParseUtil(mapping_list=mapping_list).filter(lst)
崔为之's avatar
崔为之 committed
126 127
        # 批量保存数据
        cls.batch_save(result_generator)
崔为之's avatar
崔为之 committed
128 129

    @classmethod
崔为之's avatar
崔为之 committed
130
    def parse(cls, start: str, end: str, index: str, sql: str, options: dict) -> int:
崔为之's avatar
崔为之 committed
131 132 133 134 135 136 137 138
        """
        Parse logs from Elasticsearch and save them to MySQL.

        Args:
            start (str): The start date for the logs.
            end (str): The end date for the logs.
            index (str): The Elasticsearch index to get logs from.
            sql (str): The SQL query to get the intent mapping from MySQL.
崔为之's avatar
崔为之 committed
139
            options (dict): The configuration for MySQL.
崔为之's avatar
崔为之 committed
140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164

        Returns:
            int: The total number of logs parsed.

        Raises:
            SystemError: If there is an error during the process.
        """

        # Get the DSL for the given start and end dates.
        dsl = ElasticsearchUtil.dsl(start, end)

        # Get data from Elasticsearch.
        data = cls.get_data_from_es(index=index, dsl=dsl)

        # Extract metadata from the data.
        mdata = cls.get_mdata(data)

        # Get the total number of logs.
        total = data.get('hits').get('total').get('value')
        logger.debug(f'The numbers of data by searching data from ES: {total}')

        # Log the start of the searching and saving process.
        logger.debug('The data is inserting ...')

        # Get the intent mapping from MySQL.
崔为之's avatar
崔为之 committed
165
        mapping_list = cls.get_intent_from_mysql(sql, options)
崔为之's avatar
崔为之 committed
166 167 168 169 170 171 172 173

        # Process and save the metadata.
        cls.process_and_save_data(mdata, mapping_list)

        # Get the scroll id for scroll searching in Elasticsearch.
        scroll_id = data.get('_scroll_id')

        try:
崔为之's avatar
崔为之 committed
174
            for _ in range(0, int(total / dsl.get('size', 5000) + 1)):
崔为之's avatar
崔为之 committed
175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195
                # Get more data from Elasticsearch using scroll searching.
                res = cls.get_data_from_es(sid=scroll_id)
                lst = res.get('hits').get('hits')

                if not lst:
                    continue

                # Process and save the data.
                cls.process_and_save_data(lst, mapping_list)
        except Exception as e:
            # Log the error and raise a SystemError.
            logger.error(f'The error: {e}')
            raise SystemError()
        else:
            # Log the success of the process.
            logger.debug('The process of inserting data succeed!')
        finally:
            # Log the end of the process.
            logger.debug('The inserting of the data finished!')

        return total