분석 코드 all7.py > 논문

본문 바로가기

논문

분석 코드 all7.py

profile_image
민광석
2024-11-15 14:10 260 1 0

본문

import pandas as pd
import numpy as np
from sqlalchemy import create_engine
from prophet import Prophet
from sklearn.ensemble import RandomForestRegressor
import xgboost as xgb
import lightgbm as lgb
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout
from sklearn.preprocessing import MinMaxScaler, OneHotEncoder
from sklearn.model_selection import GridSearchCV, TimeSeriesSplit
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.feature_extraction import FeatureHasher
import statsmodels.api as sm
from neuralprophet import NeuralProphet
from scipy.stats import ttest_ind, wilcoxon, pearsonr
from scipy.sparse import csr_matrix, hstack
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
import re
import logging
import sys
from multiprocessing import cpu_count
import gc

# 로그 설정
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s %(levelname)s:%(message)s',
    handlers=[
        logging.StreamHandler(sys.stdout),
        logging.FileHandler("modeling.log")
    ]
)

warnings.filterwarnings("ignore")

class DataProcessor:
    def __init__(self):
        self.df_sales = None
        self.df_life = None
        self.merged_df = None
        self.grouped_df = None

    def get_data_from_db(self, query):
        try:
            db_user = ''
            db_password = ''
            db_host = ''
            db_name = ''

            engine = create_engine(f'mysql+pymysql://{db_user}:{db_password}@{db_host}/{db_name}')
            with engine.connect() as conn:
                df = pd.read_sql(query, conn)
                logging.info(f"쿼리 성공: {query.splitlines()[0].strip()}...")
                return df
        except Exception as e:
            logging.error(f"데이터베이스 연결 오류: {e}")
            return None

    def optimize_dtypes(self, df):
        try:
            for col in df.select_dtypes(include=['int64']).columns:
                df[col] = df[col].astype('int32')
            for col in df.select_dtypes(include=['float64']).columns:
                df[col] = df[col].astype('float32')
            return df
        except Exception as e:
            logging.error(f"데이터 타입 최적화 오류: {e}")
            return df

    def apply_one_hot_encoding(self, df, columns):
        try:
            for col in columns:
                df[col] = df[col].astype(str)
                logging.info(f"{col} 고유값 개수: {df[col].nunique()}")
                logging.info(f"{col} 샘플 값: {df[col].unique()[:5]}")
           
            ohe = OneHotEncoder(drop='first', sparse_output=True, dtype=np.int8)
            ohe_features = ohe.fit_transform(df[columns])
            logging.info(f"원-핫 인코딩 변환 후 형태: {ohe_features.shape}")
            return ohe_features
        except Exception as e:
            logging.error(f"원-핫 인코딩 오류: {e}")
            return None

    def apply_feature_hashing(self, df, columns, n_features=100):  # n_features 기본값을 100으로 변경
        try:
            # 각 컬럼별로 별도의 리스트 생성
            text_features = []
            for idx in range(len(df)):
                # 각 행의 모든 컬럼 값을 리스트로 만듦
                row_features = [str(df[col].iloc[idx]) for col in columns]
                text_features.append(row_features)
           
            logging.info(f"특성 해싱 입력 데이터 샘플: {text_features[:2]}")
            logging.info(f"특성 해싱 입력 데이터 형태: {len(text_features)} rows")
           
            hasher = FeatureHasher(n_features=n_features, input_type='string')
            hashed_features = hasher.transform(text_features)
           
            logging.info(f"특성 해싱 변환 후 형태: {hashed_features.shape}")
            return hashed_features
           
        except Exception as e:
            logging.error(f"특성 해싱 오류: {e}")
            logging.error(f"columns: {columns}")
            logging.error(f"데이터 샘플: {df[columns].head()}")
            return None

    def convert_date(self, date_str):
        try:
            date = pd.to_datetime(str(date_str), format='%Y%m%d')
            if date.year < 1900 or date.year > 2100:
                logging.warning(f"비정상적인 날짜: {date_str}")
                return None
            return date
        except Exception as e:
            logging.error(f"날짜 변환 오류 - 입력값: {date_str}, 오류: {e}")
            return None

    def preprocess_data(self):
        if self.df_sales is None or self.df_life is None:
            logging.error("데이터가 충분하지 않습니다.")
            return None

        try:
            logging.info(f"df_sales 행 수 (원본): {len(self.df_sales)}")
            logging.info(f"df_life 행 수 (원본): {len(self.df_life)}")

            # 날짜 변환
            self.df_sales['납품요구접수일자'] = self.df_sales['납품요구접수일자'].apply(self.convert_date)
            logging.info(f"날짜 범위: {self.df_sales['납품요구접수일자'].min()} ~ {self.df_sales['납품요구접수일자'].max()}")

            # 결측치 처리
            self.df_sales.dropna(subset=['물품분류번호', '납품요구접수일자', '수요기관코드', '품명'], inplace=True)
            self.df_life.dropna(subset=['물품분류번호', '내용연수'], inplace=True)

            self.df_sales['물품분류번호'] = self.df_sales['물품분류번호'].astype(str).str.strip()
            self.df_life['물품분류번호'] = self.df_life['물품분류번호'].astype(str).str.strip()
           
            logging.info(f"df_sales 행 수 (dropna 후): {len(self.df_sales)}")
            logging.info(f"df_life 행 수 (dropna 후): {len(self.df_life)}")
           
            logging.info(f"df_sales '물품분류번호' 데이터 타입: {self.df_sales['물품분류번호'].dtype}")
            logging.info(f"df_life '물품분류번호' 데이터 타입: {self.df_life['물품분류번호'].dtype}")

            self.merged_df = pd.merge(self.df_sales, self.df_life, on='물품분류번호', how='inner')
            logging.info(f"병합된 데이터 행 수: {len(self.merged_df)}")

            self.merged_df = self.optimize_dtypes(self.merged_df)

            ohe_columns = ['물품분류번호']
            ohe_features = self.apply_one_hot_encoding(self.merged_df, ohe_columns)
           
            hash_columns = ['수요기관코드', '품명']
            hashed_features = self.apply_feature_hashing(self.merged_df, hash_columns, n_features=100)

            numeric_features = self.merged_df.select_dtypes(include=['int32', 'float32']).values
           
            if ohe_features is not None and hashed_features is not None:
                final_features = hstack([
                    csr_matrix(numeric_features),
                    ohe_features,
                    hashed_features
                ])
                logging.info(f"전처리된 데이터의 최종 형태: {final_features.shape}")
                return final_features
            else:
                logging.error("특성 변환 중 오류가 발생했습니다.")
                return None

        except Exception as e:
            logging.error(f"데이터 전처리 오류: {e}")
            return None

class ForecastModels:
    def __init__(self, features, original_df):
        try:
            if features is None or original_df is None:
                raise ValueError("입력 데이터가 없습니다.")
           
            if features.shape[0] != len(original_df):
                raise ValueError("특성과 원본 데이터의 길이가 일치하지 않습니다.")
           
            self.features = features
            self.original_df = original_df
            self.predictions = {}
            self.split_index = int(features.shape[0] * 0.8)
           
            # sparse matrix를 분할할 때 올바른 방법 사용
            self.X_train = features[:self.split_index]
            self.X_test = features[self.split_index:]
            self.y_train = original_df['내용연수'].values[:self.split_index]
            self.y_test = original_df['내용연수'].values[self.split_index:]
           
            # TimeSeriesSplit 설정
            self.tscv = TimeSeriesSplit(n_splits=5)
           
            logging.info(f"학습 데이터 형태: {self.X_train.shape}")
            logging.info(f"테스트 데이터 형태: {self.X_test.shape}")
            logging.info(f"학습 레이블 수: {len(self.y_train)}")
            logging.info(f"테스트 레이블 수: {len(self.y_test)}")
           
        except Exception as e:
            logging.error(f"ForecastModels 초기화 오류: {e}")
            raise

    def run_prophet(self):
        try:
            logging.info("Prophet 모델 학습 시작...")
           
            # 날짜별 내용연수의 가중 평균 계산 (구매 건수 기준)
            df_prophet = pd.DataFrame({
                'ds': self.original_df['납품요구접수일자'],
                'y': self.original_df['내용연수'],
                'count': 1  # 각 구매 건수를 카운트
            })
           
            # 날짜별 통계 로깅
            daily_stats = df_prophet.groupby('ds').agg({
                'y': ['count', 'mean', 'min', 'max'],
                'count': 'sum'
            })
            logging.info(f"일별 구매 통계:\n{daily_stats.head()}")
           
            logging.info(f"Prophet 학습 데이터 크기: {len(df_prophet[:self.split_index])}")
            logging.info(f"Prophet 테스트 데이터 크기: {len(df_prophet[self.split_index:])}")
            logging.info(f"시작 날짜: {df_prophet['ds'].min().strftime('%Y-%m-%d')}")
            logging.info(f"종료 날짜: {df_prophet['ds'].max().strftime('%Y-%m-%d')}")
           
            model = Prophet(
                yearly_seasonality=True,
                weekly_seasonality=True,
                daily_seasonality=True,
                interval_width=0.95
            )
           
            model.fit(df_prophet[['ds', 'y']][:self.split_index])
           
            future_dates = df_prophet[self.split_index:]['ds']
            future = pd.DataFrame({'ds': future_dates})
            forecast = model.predict(future)
            self.predictions['Prophet'] = forecast['yhat'].values
           
            mse = mean_squared_error(self.y_test, self.predictions['Prophet'])
            logging.info(f"Prophet MSE: {mse}")
           
            del model, forecast, future
            gc.collect()
           
        except Exception as e:
            logging.error(f"Prophet 모델 오류: {e}")

    def run_random_forest(self):
        try:
            logging.info("RandomForest 모델 학습 시작...")
            model = RandomForestRegressor(
                n_estimators=100,
                max_depth=10,
                min_samples_split=5,
                min_samples_leaf=2,
                n_jobs=-1,
                random_state=42
            )
           
            # 교차 검증
            cv_scores = []
            for train_idx, val_idx in self.tscv.split(self.X_train):
                X_cv_train, X_cv_val = self.X_train[train_idx], self.X_train[val_idx]
                y_cv_train, y_cv_val = self.y_train[train_idx], self.y_train[val_idx]
               
                model.fit(X_cv_train, y_cv_train)
                pred = model.predict(X_cv_val)
                cv_scores.append(mean_squared_error(y_cv_val, pred))
           
            logging.info(f"RandomForest 교차 검증 MSE: {np.mean(cv_scores):.4f}{np.std(cv_scores):.4f})")
           
            model.fit(self.X_train, self.y_train)
            self.predictions['RandomForest'] = model.predict(self.X_test)
           
            mse = mean_squared_error(self.y_test, self.predictions['RandomForest'])
            logging.info(f"RandomForest MSE: {mse:.4f}")
           
            # 특성 중요도
            feature_importance = pd.DataFrame({
                'feature': range(self.X_train.shape[1]),
                'importance': model.feature_importances_
            })
            logging.info(f"상위 10개 중요 특성:\n{feature_importance.nlargest(10, 'importance')}")
           
            del model
            gc.collect()
           
        except Exception as e:
            logging.error(f"RandomForest 모델 오류: {e}")

    def run_xgboost(self):
        try:
            logging.info("XGBoost 모델 학습 시작...")
            model = xgb.XGBRegressor(
                n_estimators=100,
                learning_rate=0.1,
                max_depth=7,
                n_jobs=-1,
                random_state=42
            )
           
            cv_scores = []
            for train_idx, val_idx in self.tscv.split(self.X_train):
                X_cv_train, X_cv_val = self.X_train[train_idx], self.X_train[val_idx]
                y_cv_train, y_cv_val = self.y_train[train_idx], self.y_train[val_idx]
               
                model.fit(
                    X_cv_train, y_cv_train,
                    eval_set=[(X_cv_val, y_cv_val)],
                    early_stopping_rounds=10,
                    verbose=False
                )
                pred = model.predict(X_cv_val)
                cv_scores.append(mean_squared_error(y_cv_val, pred))
           
            logging.info(f"XGBoost 교차 검증 MSE: {np.mean(cv_scores):.4f}{np.std(cv_scores):.4f})")
           
            model.fit(self.X_train, self.y_train)
            self.predictions['XGBoost'] = model.predict(self.X_test)
           
            mse = mean_squared_error(self.y_test, self.predictions['XGBoost'])
            logging.info(f"XGBoost MSE: {mse:.4f}")
           
            del model
            gc.collect()
           
        except Exception as e:
            logging.error(f"XGBoost 모델 오류: {e}")

    def run_lightgbm(self):
        try:
            logging.info("LightGBM 모델 학습 시작...")
            params = {
                'objective': 'regression',
                'metric': 'mse',
                'num_leaves': 127,
                'max_depth': 7,
                'learning_rate': 0.1,
                'feature_fraction': 0.8,
                'bagging_fraction': 0.8,
                'bagging_freq': 5,
                'min_data_in_leaf': 100,
                'num_iterations': 100,
                'verbose': -1,
                'force_row_wise': True,
                'n_jobs': -1
            }
           
            model = lgb.LGBMRegressor(**params)
           
            cv_scores = []
            for train_idx, val_idx in self.tscv.split(self.X_train):
                X_cv_train = self.X_train[train_idx]
                X_cv_val = self.X_train[val_idx]
                y_cv_train = self.y_train[train_idx]
                y_cv_val = self.y_train[val_idx]
               
                model.fit(
                    X_cv_train, y_cv_train,
                    eval_set=[(X_cv_val, y_cv_val)],
                    early_stopping_rounds=10,
                    verbose=False
                )
               
                pred = model.predict(X_cv_val)
                cv_scores.append(mean_squared_error(y_cv_val, pred))
           
            logging.info(f"LightGBM 교차 검증 MSE: {np.mean(cv_scores):.4f}{np.std(cv_scores):.4f})")
           
            model.fit(self.X_train, self.y_train)
            self.predictions['LightGBM'] = model.predict(self.X_test)
           
            mse = mean_squared_error(self.y_test, self.predictions['LightGBM'])
            logging.info(f"LightGBM MSE: {mse:.4f}")
           
            del model
            gc.collect()
           
        except Exception as e:
            logging.error(f"LightGBM 모델 오류: {e}")

    def run_lstm(self):
        try:
            logging.info("LSTM 모델 학습 시작...")
           
            # 데이터 정규화
            scaler = MinMaxScaler()
            X_train_scaled = scaler.fit_transform(self.X_train.toarray())
            X_test_scaled = scaler.transform(self.X_test.toarray())
           
            # 시계열 특성 추가
            dates_train = self.original_df['납품요구접수일자'].values[:self.split_index]
            dates_test = self.original_df['납품요구접수일자'].values[self.split_index:]
           
            def extract_date_features(dates):
                return np.column_stack([
                    pd.to_datetime(dates).dayofweek,
                    pd.to_datetime(dates).dayofyear,
                    pd.to_datetime(dates).month
                ])
           
            date_features_train = extract_date_features(dates_train)
            date_features_test = extract_date_features(dates_test)
           
            X_train_with_dates = np.hstack([X_train_scaled, date_features_train])
            X_test_with_dates = np.hstack([X_test_scaled, date_features_test])
           
            X_train_reshaped = X_train_with_dates.reshape((X_train_with_dates.shape[0], 1, X_train_with_dates.shape[1]))
            X_test_reshaped = X_test_with_dates.reshape((X_test_with_dates.shape[0], 1, X_test_with_dates.shape[1]))
           
            model = Sequential([
                LSTM(100, input_shape=(1, X_train_with_dates.shape[1]), return_sequences=True),
                Dropout(0.2),
                LSTM(50),
                Dropout(0.2),
                Dense(25, activation='relu'),
                Dense(1)
            ])
           
            model.compile(optimizer='adam', loss='mse')
           
            early_stopping = tf.keras.callbacks.EarlyStopping(
                monitor='val_loss',
                patience=5,
                restore_best_weights=True
            )
           
            history = model.fit(
                X_train_reshaped,
                self.y_train,
                epochs=50,
                batch_size=64,
                validation_split=0.2,
                callbacks=[early_stopping],
                verbose=1
            )
           
            self.predictions['LSTM'] = model.predict(X_test_reshaped).flatten()
           
            mse = mean_squared_error(self.y_test, self.predictions['LSTM'])
            logging.info(f"LSTM MSE: {mse:.4f}")
           
            plt.figure(figsize=(10, 6))
            plt.plot(history.history['loss'], label='Training Loss')
            plt.plot(history.history['val_loss'], label='Validation Loss')
            plt.title('LSTM Model Loss')
            plt.xlabel('Epoch')
            plt.ylabel('Loss')
            plt.legend()
            plt.savefig('lstm_training_history.png')
            plt.close()
           
            del model, X_train_scaled, X_test_scaled
            gc.collect()
           
        except Exception as e:
            logging.error(f"LSTM 모델 오류: {e}")

    def run_neural_prophet(self):
        try:
            logging.info("NeuralProphet 모델 학습 시작...")
            df_prophet = pd.DataFrame({
                'ds': self.original_df['납품요구접수일자'],
                'y': self.original_df['내용연수'],
                'count': 1
            })
           
            daily_stats = df_prophet.groupby('ds').agg({
                'y': ['count', 'mean', 'min', 'max'],
                'count': 'sum'
            })
            logging.info(f"NeuralProphet 일별 통계:\n{daily_stats.head()}")
           
            model = NeuralProphet(
                batch_size=64,
                learning_rate=0.01,
                epochs=50,
                daily_seasonality=True,
                weekly_seasonality=True,
                yearly_seasonality=True
            )
           
            model.fit(df_prophet[['ds', 'y']][:self.split_index], freq='D')
           
            future = model.make_future_dataframe(df_prophet[:self.split_index], periods=len(self.y_test))
            forecast = model.predict(future)
           
            self.predictions['NeuralProphet'] = forecast['yhat1'].values[:len(self.y_test)]
           
            mse = mean_squared_error(self.y_test, self.predictions['NeuralProphet'])
            logging.info(f"NeuralProphet MSE: {mse:.4f}")
           
            del model, forecast, future
            gc.collect()
           
        except Exception as e:
            logging.error(f"NeuralProphet 모델 오류: {e}")

    def run_arima(self):
        try:
            logging.info("ARIMA 모델 학습 시작...")
           
            ts_data = pd.DataFrame({
                'date': self.original_df['납품요구접수일자'],
                'value': self.original_df['내용연수'],
                'count': 1
            })
           
            daily_data = ts_data.groupby('date').agg({
                'value': ['count', 'mean', 'sum'],
                'count': 'sum'
            })
            logging.info(f"ARIMA 일별 통계:\n{daily_data.head()}")
           
            ts_series = pd.Series(
                self.original_df['내용연수'].values[:self.split_index],
                index=self.original_df['납품요구접수일자'].values[:self.split_index]
            )
           
            model = sm.tsa.ARIMA(ts_series, order=(2,1,2))
            results = model.fit()
           
            forecast = results.forecast(steps=len(self.y_test))
            self.predictions['ARIMA'] = forecast.values
           
            mse = mean_squared_error(self.y_test, self.predictions['ARIMA'])
            logging.info(f"ARIMA MSE: {mse:.4f}")
           
            del model, results, forecast
            gc.collect()
           
        except Exception as e:
            logging.error(f"ARIMA 모델 오류: {e}")

    def run_all_models(self):
        try:
            logging.info("모든 모델 학습 시작...")
           
            models = {
                'Prophet': self.run_prophet,
                'RandomForest': self.run_random_forest,
                'XGBoost': self.run_xgboost,
                'LightGBM': self.run_lightgbm,
                'LSTM': self.run_lstm,
                'NeuralProphet': self.run_neural_prophet,
                'ARIMA': self.run_arima
            }
           
            for name, func in models.items():
                try:
                    logging.info(f"{name} 모델 학습 중...")
                    func()
                    gc.collect()
                except Exception as e:
                    logging.error(f"{name} 모델 실행 실패: {e}")
                    continue
           
            logging.info("모든 모델 학습 완료")
           
            # 모델 성능 요약
            for model_name, predictions in self.predictions.items():
                if len(predictions) == len(self.y_test):
                    mse = mean_squared_error(self.y_test, predictions)
                    mae = mean_absolute_error(self.y_test, predictions)
                    r2 = r2_score(self.y_test, predictions)
                    logging.info(f"{model_name} 최종 성능:")
                    logging.info(f"- MSE: {mse:.4f}")
                    logging.info(f"- MAE: {mae:.4f}")
                    logging.info(f"- R2: {r2:.4f}")
           
        except Exception as e:
            logging.error(f"모델 학습 중 오류 발생: {e}")
            import traceback
            logging.error(traceback.format_exc())

class ReportGenerator:
    def __init__(self, original_df, predictions, split_index):
        self.original_df = original_df
        self.predictions = predictions
        self.split_index = split_index
        self.y_test = original_df['내용연수'].values[split_index:]

    def generate_model_performance_report(self):
        try:
            logging.info("모델 성능 보고서 생성 시작...")
            performance_metrics = {}
           
            for model_name, pred in self.predictions.items():
                if len(pred) == len(self.y_test):
                    metrics = {
                        'MSE': mean_squared_error(self.y_test, pred),
                        'MAE': mean_absolute_error(self.y_test, pred),
                        'R2': r2_score(self.y_test, pred)
                    }
                    performance_metrics[model_name] = metrics
           
            # 결과를 DataFrame으로 변환
            df_metrics = pd.DataFrame(performance_metrics).T
            df_metrics.to_csv('model_performance_metrics.csv')
            logging.info("모델 성능 보고서가 생성되었습니다.")
            logging.info("\n" + str(df_metrics))
           
        except Exception as e:
            logging.error(f"모델 성능 보고서 생성 오류: {e}")

    def generate_statistical_analysis_report(self):
        try:
            logging.info("통계 분석 보고서 생성 시작...")
            statistical_tests = {}
           
            for model_name, pred in self.predictions.items():
                if len(pred) == len(self.y_test):
                    # t-test
                    t_stat, t_pval = ttest_ind(self.y_test, pred)
                    # Wilcoxon test
                    w_stat, w_pval = wilcoxon(self.y_test, pred)
                    # Pearson correlation
                    corr, p_val = pearsonr(self.y_test, pred)
                   
                    statistical_tests[model_name] = {
                        't_statistic': t_stat,
                        't_pvalue': t_pval,
                        'wilcoxon_statistic': w_stat,
                        'wilcoxon_pvalue': w_pval,
                        'correlation': corr,
                        'correlation_pvalue': p_val
                    }
           
            df_stats = pd.DataFrame(statistical_tests).T
            df_stats.to_csv('statistical_analysis.csv')
            logging.info("통계 분석 보고서가 생성되었습니다.")
            logging.info("\n" + str(df_stats))
           
        except Exception as e:
            logging.error(f"통계 분석 보고서 생성 오류: {e}")

    def generate_detailed_statistical_analysis_report(self):
        try:
            logging.info("상세 통계 분석 보고서 생성 시작...")
            detailed_stats = {}
           
            for model_name, pred in self.predictions.items():
                if len(pred) == len(self.y_test):
                    detailed_stats[model_name] = {
                        'Mean_Actual': np.mean(self.y_test),
                        'Mean_Predicted': np.mean(pred),
                        'Std_Actual': np.std(self.y_test),
                        'Std_Predicted': np.std(pred),
                        'Min_Actual': np.min(self.y_test),
                        'Min_Predicted': np.min(pred),
                        'Max_Actual': np.max(self.y_test),
                        'Max_Predicted': np.max(pred)
                    }
           
            df_detailed = pd.DataFrame(detailed_stats).T
            df_detailed.to_csv('detailed_statistical_analysis.csv')
            logging.info("상세 통계 분석 보고서가 생성되었습니다.")
            logging.info("\n" + str(df_detailed))
           
        except Exception as e:
            logging.error(f"상세 통계 분석 보고서 생성 오류: {e}")

    def generate_visualizations(self):
        try:
            logging.info("시각화 생성 시작...")
            for model_name, pred in self.predictions.items():
                if len(pred) == len(self.y_test):
                    plt.figure(figsize=(12, 6))
                    dates = pd.to_datetime(self.original_df['납품요구접수일자'].values[self.split_index:])
                   
                    plt.plot(dates, self.y_test, label='Actual', color='blue')
                    plt.plot(dates, pred, label=f'{model_name}', color='red')
                    plt.title(f'{model_name} 예측 결과')
                    plt.xlabel('날짜')
                    plt.ylabel('내용연수')
                    plt.legend()
                    plt.xticks(rotation=45)
                    plt.tight_layout()
                    plt.savefig(f'{model_name}_prediction_plot.png')
                    plt.close()
           
            logging.info("시각화가 생성되었습니다.")
           
        except Exception as e:
            logging.error(f"시각화 생성 오류: {e}")

    def generate_all_reports(self):
        self.generate_model_performance_report()
        self.generate_statistical_analysis_report()
        self.generate_detailed_statistical_analysis_report()
        self.generate_visualizations()


if __name__ == "__main__":
    try:
        # 데이터 처리
        data_processor = DataProcessor()
       
        # SQL 쿼리
        query_sales = """
        SELECT
            CAST(납품요구접수일자 AS CHAR) AS 납품요구접수일자,
            CAST(물품분류번호 AS CHAR) AS 물품분류번호,
            품명,
            수요기관코드
        FROM
            g2b_data
        """
        query_life = """
        SELECT
            물품분류번호,
            내용연수
        FROM
            g2blifedb
        """
       
        # 데이터 로드 및 처리
        logging.info("판매 데이터 로딩 시작...")
        data_processor.df_sales = data_processor.get_data_from_db(query_sales)
        logging.info(f"판매 데이터 로딩 완료. 행 수: {len(data_processor.df_sales) if data_processor.df_sales is not None else 0}")
       
        logging.info("수명 데이터 로딩 시작...")
        data_processor.df_life = data_processor.get_data_from_db(query_life)
        logging.info(f"수명 데이터 로딩 완료. 행 수: {len(data_processor.df_life) if data_processor.df_life is not None else 0}")
       
        df_sparse = data_processor.preprocess_data()

        if df_sparse is not None and df_sparse.shape[0] > 0:
            # 모델 예측
            forecast_models = ForecastModels(df_sparse, data_processor.merged_df)
            forecast_models.run_all_models()

            # 보고서 생성
            report_generator = ReportGenerator(data_processor.merged_df, forecast_models.predictions, forecast_models.split_index)
            report_generator.generate_all_reports()

            # 메모리 최적화
            del data_processor, forecast_models
            gc.collect()
        else:
            logging.error("유효한 데이터가 없습니다.")
    except Exception as e:
        logging.error(f"메인 실행 오류: {e}")
0
로그인 후 추천 또는 비추천하실 수 있습니다.

댓글목록1

민광석님의 댓글

profile_image
민광석
2024-11-15 15:44
2024-11-15 15:42:15,267 INFO:1/7: Prophet 모델 학습 중...
2024-11-15 15:42:15,267 INFO:Prophet 모델 학습 시작...
2024-11-15 15:42:15,373 INFO:Prophet 학습 데이터 크기: 13081363
2024-11-15 15:42:15,373 INFO:Prophet 테스트 데이터 크기: 3270341
2024-11-15 15:42:15,384 INFO:시작 날짜: 1970-01-01 00:00:00.020050103
2024-11-15 15:42:15,393 INFO:종료 날짜: 1970-01-01 00:00:00.020231231
2024-11-15 15:43:11,339 DEBUG:input tempfile: /var/folders/kv/hdvf6vwx57x32gx24h1l50dr0000gn/T/tmp09rhwknm/rsy3cyo5.json
전체 15 건 - 1 페이지
번호
제목
글쓴이
15
민광석
8시간 50분전
7
0
민광석
8시간 50분전
14
민광석
2024-11-18
165
0
민광석
2024-11-18
13
민광석
2024-11-17
181
0
민광석
2024-11-17
12
민광석
2024-11-15
212
0
민광석
2024-11-15
열람
민광석
2024-11-15
261
0
민광석
2024-11-15
10
민광석
2024-11-15
212
0
민광석
2024-11-15
민광석
2024-11-13
8
민광석
2024-11-12
309
0
민광석
2024-11-12
민광석
2024-11-12
민광석
2024-11-11
5
민광석
2024-11-11
312
0
민광석
2024-11-11
민광석
2024-11-11
민광석
2024-11-05
민광석
2024-11-05
민광석
2024-11-05
게시판 전체검색