from pathlib import Path
import pandas as pd

PREDECTIONS_DIR =  Path('data/predictions/2526')
PLAYERS_GW_DIR = Path('data/raw/2025-26/gws')
EVALUATION_DIR = Path('data/evaluation/2526')

ERROR_MARGIN = 2

def determine_prediction_accuracy(row):

    diff = row['error']
    acceptable_err , bad_err = (False,False)
    if(diff < 0):
      acceptable_err ,bad_err = (True,False)  if  abs(diff) <= ERROR_MARGIN else (False,True)

    return pd.Series([acceptable_err,bad_err],index=["acceptable_err","bad_err"])
def create_evaluation_metrics(gw_eval: pd.DataFrame, gw : int):
    metrics_file = EVALUATION_DIR / 'model_eval_metrics.csv'
    if metrics_file.exists():
        model_eval_metrics = pd.read_csv(metrics_file)
    else:
        model_eval_metrics = pd.DataFrame(columns=["gw","predictions_count","error_pred_count","accept_err","bad_err","total_err_%","accept_err_%","bad_err_%",'max_postive_err','max_negative_err'])
    gw_df = gw_eval.copy()
    gw_df[["acceptable_err","bad_err"]] = gw_df.apply(determine_prediction_accuracy,axis=1)
    accept_err_count = gw_df['acceptable_err'].value_counts().get(True)
    bad_err_count = gw_df['bad_err'].value_counts().get(True)
    total_error = accept_err_count + bad_err_count
    total_len = len(gw_df)
    total_err_percentage = round(total_error /  total_len * 100 , 1) 
    bad_err_percentage = round(bad_err_count /  total_len * 100,1 ) 
    acceptable_err_percentage = round(accept_err_count /  total_len * 100,1) 
    # mse = np.mean(gw_df['error'] ** 2)
    # mae = np.mean(np.abs(gw_df['error']))
    max_postive_err = gw_df['error'].max()
    max_negative_err = gw_df['error'].min()
    model_eval_metrics.loc[gw-1] = pd.Series([gw,total_len,total_error,accept_err_count,bad_err_count,total_err_percentage,acceptable_err_percentage,bad_err_percentage,max_postive_err,max_negative_err],
                                        index=["gw","predictions_count","error_pred_count","accept_err","bad_err","total_err_%","accept_err_%","bad_err_%",'max_postive_err','max_negative_err'])
    
    model_eval_metrics.to_csv(metrics_file,index=False)

    # summery =  model_accurecy.mean().round(1)
    # model_accurecy.loc[38] = summery
    # print(max(model_accurecy['total_err_%']))
    # print(max(model_accurecy['accept_err_%']))
    # print(max(model_accurecy['bad_err_%']))
def evaluate_model_predictions():
    try:
        for gw in range(1, 39):
            # Load the predictions and actuals
            preds_file = PREDECTIONS_DIR / f'gw{gw}_predictions.csv'
            actuals_file = PLAYERS_GW_DIR / f'gw{gw}.csv'
            
            preds = pd.read_csv(preds_file).rename(columns={'xP': 'prediction'})
            actuals = pd.read_csv(actuals_file)[['element','name', 'total_points']].rename(columns={'element': 'id', 'total_points': 'actual'})
            
            # Merge on player ID to align predictions with actuals
            merged = pd.merge(actuals, preds, on='id',how='left')
            
            # Calculate evaluation metrics
            merged['error'] = merged['actual'] - merged['prediction']

            
            # Save evaluation results
            create_evaluation_metrics(merged, gw)
            gw_file = EVALUATION_DIR / f'gw{gw}.csv'
            merged.to_csv(gw_file, index=False)
            
    except Exception as e:
        pass
