File size: 3,216 Bytes
c3cc0a9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import json
import logging
import numpy as np
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix
from model import JobRecommendationModel
import matplotlib.pyplot as plt
import seaborn as sns
from datetime import datetime
import os

# Configure logging
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('job_recommendation_testing.log'),
        logging.StreamHandler()
    ]
)

logger = logging.getLogger(__name__)

class JobRecommendationTester:
    def __init__(self, model_path, test_data_path):
        self.model = JobRecommendationModel()
        self.model.load_model(model_path)
        self.test_data_path = test_data_path
        
        # Create results directory
        self.results_dir = 'models/job_recommendation/test_results'
        os.makedirs(self.results_dir, exist_ok=True)

    def load_test_data(self):
        """Load test data from JSON file"""
        with open(self.test_data_path, 'r') as f:
            return json.load(f)

    def evaluate_model(self):
        """Evaluate model performance on test data"""
        test_data = self.load_test_data()
        X_test, y_test = self.model.preprocess_data(test_data)
        
        # Get predictions
        y_pred = self.model.model.predict(X_test)
        y_pred_binary = (y_pred > 0.5).astype(int)
        
        # Calculate metrics
        metrics = {
            'accuracy': accuracy_score(y_test, y_pred_binary),
            'precision': precision_score(y_test, y_pred_binary),
            'recall': recall_score(y_test, y_pred_binary),
            'f1': f1_score(y_test, y_pred_binary)
        }
        
        cm = confusion_matrix(y_test, y_pred_binary)
        
        return metrics, cm, y_test, y_pred

    def run_full_test(self):
        """Run complete test suite"""
        logger.info("Starting full model testing")
        
        metrics, cm, y_test, y_pred = self.evaluate_model()
        
        # Save results
        report = {
            'metrics': metrics,
            'test_timestamp': datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%SZ")
        }
        
        with open(f'{self.results_dir}/test_report.json', 'w') as f:
            json.dump(report, f, indent=2)
        
        logger.info("Testing completed. Results saved in test_results directory")
        return report

def main():
    try:
        model_path = 'models/job_recommendation/saved_model/model.keras'
        test_data_path = 'models/job_recommendation/test_data/test_data.json'
        
        tester = JobRecommendationTester(model_path, test_data_path)
        report = tester.run_full_test()
        
        logger.info("\nTest Results Summary:")
        logger.info(f"Accuracy: {report['metrics']['accuracy']:.4f}")
        logger.info(f"Precision: {report['metrics']['precision']:.4f}")
        logger.info(f"Recall: {report['metrics']['recall']:.4f}")
        logger.info(f"F1 Score: {report['metrics']['f1']:.4f}")
        
    except Exception as e:
        logger.error(f"Error during testing: {str(e)}", exc_info=True)
        raise

if __name__ == "__main__":
    main()