Spaces:
Running
Running
File size: 3,038 Bytes
63a1401 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 |
import unittest
from unittest.mock import patch
import pandas as pd
import src.backend.evaluate_model as evaluate_model
class TestSummaryGenerator(unittest.TestCase):
def setUp(self):
self.model_id = "test_model"
self.revision = "test_revision"
@patch("src.backend.model_operations.AutoTokenizer")
@patch("src.backend.model_operations.AutoModelForCausalLM")
def test_init(self, mock_model, mock_tokenizer):
evaluate_model.SummaryGenerator(self.model_id, self.revision)
mock_tokenizer.from_pretrained.assert_called_once_with(self.model_id,
self.revision)
mock_model.from_pretrained.assert_called_once_with(self.model_id,
self.revision)
@patch("src.backend.model_operations.nlp")
@patch("src.backend.model_operations.AutoTokenizer")
@patch("src.backend.model_operations.AutoModelForCausalLM")
def test_generate_summaries(self, mock_model, mock_tokenizer, mock_nlp):
df = pd.DataFrame({'text': ['text1', 'text2'],
'dataset': ['dataset1', 'dataset2']})
generator = evaluate_model.SummaryGenerator(self.model_id, self.revision)
generator.generate_summaries(df)
self.assertEqual(len(generator.summaries_df), len(df))
@patch("src.backend.model_operations.AutoTokenizer")
@patch("src.backend.model_operations.AutoModelForCausalLM")
def test_compute_avg_length(self, mock_model, mock_tokenizer):
generator = evaluate_model.SummaryGenerator(self.model_id, self.revision)
test_df = pd.DataFrame({'source': ['text'], 'summary': ['This is a test.'],
'dataset': ['dataset']})
generator.summaries_df = test_df
generator._compute_avg_length()
self.assertEqual(generator.avg_length, 4)
@patch("src.backend.model_operations.AutoTokenizer")
@patch("src.backend.model_operations.AutoModelForCausalLM")
def test_compute_answer_rate(self, mock_model, mock_tokenizer):
generator = evaluate_model.SummaryGenerator(self.model_id, self.revision)
test_df = pd.DataFrame({'source': ['text'], 'summary': ['This is a test.'],
'dataset': ['dataset']})
generator.summaries_df = test_df
generator._compute_answer_rate()
self.assertEqual(generator.answer_rate, 1)
@patch("src.backend.model_operations.AutoTokenizer")
@patch("src.backend.model_operations.AutoModelForCausalLM")
def test_error_rate(self, mock_model, mock_tokenizer):
generator = evaluate_model.SummaryGenerator(self.model_id, self.revision)
test_df = pd.DataFrame({'source': ['text'], 'summary': ['This is a test.'],
'dataset': ['dataset']})
generator.summaries_df = test_df
generator._compute_error_rate(0)
self.assertEqual(generator.error_rate, 0)
if __name__ == "__main__":
unittest.main()
|