feat: add ifeval parser
Browse files- llmdataparser/ifeval_parser.py +94 -0
- tests/test_ifeval_parser.py +91 -0
llmdataparser/ifeval_parser.py
ADDED
@@ -0,0 +1,94 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from typing import Any, ClassVar, List
|
3 |
+
|
4 |
+
from llmdataparser.base_parser import HuggingFaceDatasetParser, HuggingFaceParseEntry
|
5 |
+
from llmdataparser.prompts import IFEVAL_SYSTEM_PROMPT # You'll need to create this
|
6 |
+
|
7 |
+
|
8 |
+
@dataclass(frozen=True, kw_only=True, slots=True)
|
9 |
+
class IFEvalParseEntry(HuggingFaceParseEntry):
|
10 |
+
"""Custom entry class for IFEval, with fields specific to this dataset parser."""
|
11 |
+
|
12 |
+
key: int
|
13 |
+
instruction_id_list: List[str]
|
14 |
+
kwargs: dict[str, Any]
|
15 |
+
|
16 |
+
@classmethod
|
17 |
+
def create(
|
18 |
+
cls,
|
19 |
+
prompt: str,
|
20 |
+
answer: str,
|
21 |
+
raw_question: str,
|
22 |
+
raw_answer: str,
|
23 |
+
key: int,
|
24 |
+
instruction_id_list: List[str],
|
25 |
+
kwargs: dict[str, Any],
|
26 |
+
task_name: str,
|
27 |
+
) -> "IFEvalParseEntry":
|
28 |
+
return cls(
|
29 |
+
prompt=prompt,
|
30 |
+
answer=answer,
|
31 |
+
raw_question=raw_question,
|
32 |
+
raw_answer=raw_answer,
|
33 |
+
key=key,
|
34 |
+
instruction_id_list=instruction_id_list,
|
35 |
+
kwargs=kwargs,
|
36 |
+
task_name=task_name,
|
37 |
+
)
|
38 |
+
|
39 |
+
|
40 |
+
class IFEvalDatasetParser(HuggingFaceDatasetParser[IFEvalParseEntry]):
|
41 |
+
"""Parser for the IFEval dataset."""
|
42 |
+
|
43 |
+
_data_source: ClassVar[str] = "google/IFEval"
|
44 |
+
_default_task: ClassVar[str] = "default"
|
45 |
+
_task_names: ClassVar[list[str]] = ["default"]
|
46 |
+
_default_system_prompt: ClassVar[str] = IFEVAL_SYSTEM_PROMPT
|
47 |
+
|
48 |
+
def process_entry(
|
49 |
+
self, row: dict[str, Any], task_name: str | None = None, **kwargs: Any
|
50 |
+
) -> IFEvalParseEntry:
|
51 |
+
"""Process a single IFEval entry."""
|
52 |
+
# Extract fields from the row
|
53 |
+
key = row["key"]
|
54 |
+
raw_question = row["prompt"] # The prompt is the raw question in this case
|
55 |
+
instruction_id_list = row["instruction_id_list"]
|
56 |
+
kwargs_data = row["kwargs"]
|
57 |
+
|
58 |
+
# For IFEval, we don't have explicit answers in the dataset
|
59 |
+
# We'll use empty strings as placeholders
|
60 |
+
answer = ""
|
61 |
+
raw_answer = ""
|
62 |
+
|
63 |
+
# Combine system prompt with the instruction prompt
|
64 |
+
prompt = f"{self._system_prompt}\n\n{raw_question}"
|
65 |
+
|
66 |
+
# Use task_name if provided, otherwise use default
|
67 |
+
task = task_name or self._get_current_task(row)
|
68 |
+
|
69 |
+
return IFEvalParseEntry.create(
|
70 |
+
prompt=prompt,
|
71 |
+
answer=answer,
|
72 |
+
raw_question=raw_question,
|
73 |
+
raw_answer=raw_answer,
|
74 |
+
key=key,
|
75 |
+
instruction_id_list=instruction_id_list,
|
76 |
+
kwargs=kwargs_data,
|
77 |
+
task_name=task,
|
78 |
+
)
|
79 |
+
|
80 |
+
|
81 |
+
if __name__ == "__main__":
|
82 |
+
# Example usage
|
83 |
+
parser = IFEvalDatasetParser()
|
84 |
+
parser.load()
|
85 |
+
parser.parse()
|
86 |
+
|
87 |
+
parsed_data = parser.get_parsed_data
|
88 |
+
if parsed_data:
|
89 |
+
example = parsed_data[0]
|
90 |
+
print("\nExample parsed entry:")
|
91 |
+
print(f"Key: {example.key}")
|
92 |
+
print(f"Prompt: {example.prompt}")
|
93 |
+
print(f"Instruction IDs: {example.instruction_id_list}")
|
94 |
+
print(f"kwargs: {example.kwargs}")
|
tests/test_ifeval_parser.py
ADDED
@@ -0,0 +1,91 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import pytest
|
2 |
+
|
3 |
+
from llmdataparser.ifeval_parser import IFEvalDatasetParser, IFEvalParseEntry
|
4 |
+
|
5 |
+
|
6 |
+
@pytest.fixture
|
7 |
+
def sample_ifeval_entries():
|
8 |
+
"""Create sample IFEval dataset entries for testing."""
|
9 |
+
return [
|
10 |
+
{
|
11 |
+
"key": 1,
|
12 |
+
"prompt": "Write a function to calculate factorial.",
|
13 |
+
"instruction_id_list": ["math_001", "programming_001"],
|
14 |
+
"kwargs": {"difficulty": "medium", "category": "mathematics"},
|
15 |
+
},
|
16 |
+
{
|
17 |
+
"key": 2,
|
18 |
+
"prompt": "Explain quantum computing.",
|
19 |
+
"instruction_id_list": ["physics_001"],
|
20 |
+
"kwargs": {"difficulty": "hard", "category": "physics"},
|
21 |
+
},
|
22 |
+
]
|
23 |
+
|
24 |
+
|
25 |
+
@pytest.fixture
|
26 |
+
def ifeval_parser():
|
27 |
+
"""Create an IFEval parser instance."""
|
28 |
+
return IFEvalDatasetParser()
|
29 |
+
|
30 |
+
|
31 |
+
def test_ifeval_parse_entry_creation_valid():
|
32 |
+
"""Test valid creation of IFEvalParseEntry."""
|
33 |
+
entry = IFEvalParseEntry.create(
|
34 |
+
prompt="Test system prompt\n\nTest instruction",
|
35 |
+
answer="", # IFEval doesn't have answers
|
36 |
+
raw_question="Test instruction",
|
37 |
+
raw_answer="",
|
38 |
+
key=1,
|
39 |
+
instruction_id_list=["test_001", "test_002"],
|
40 |
+
kwargs={"difficulty": "easy"},
|
41 |
+
task_name="default",
|
42 |
+
)
|
43 |
+
|
44 |
+
assert isinstance(entry, IFEvalParseEntry)
|
45 |
+
assert entry.prompt == "Test system prompt\n\nTest instruction"
|
46 |
+
assert entry.answer == ""
|
47 |
+
assert entry.key == 1
|
48 |
+
assert entry.instruction_id_list == ["test_001", "test_002"]
|
49 |
+
assert entry.kwargs == {"difficulty": "easy"}
|
50 |
+
assert entry.task_name == "default"
|
51 |
+
|
52 |
+
|
53 |
+
def test_process_entry_ifeval(ifeval_parser, sample_ifeval_entries):
|
54 |
+
"""Test processing entries in IFEval parser."""
|
55 |
+
entry = ifeval_parser.process_entry(sample_ifeval_entries[0])
|
56 |
+
|
57 |
+
assert isinstance(entry, IFEvalParseEntry)
|
58 |
+
assert entry.key == 1
|
59 |
+
assert entry.instruction_id_list == ["math_001", "programming_001"]
|
60 |
+
assert entry.kwargs == {"difficulty": "medium", "category": "mathematics"}
|
61 |
+
assert entry.raw_question == "Write a function to calculate factorial."
|
62 |
+
assert entry.answer == "" # IFEval doesn't have answers
|
63 |
+
assert entry.task_name == "default"
|
64 |
+
|
65 |
+
|
66 |
+
def test_parser_initialization(ifeval_parser):
|
67 |
+
"""Test initialization of IFEval parser."""
|
68 |
+
assert ifeval_parser._data_source == "google/IFEval"
|
69 |
+
assert ifeval_parser._default_task == "default"
|
70 |
+
assert ifeval_parser.task_names == ["default"]
|
71 |
+
assert (
|
72 |
+
ifeval_parser.get_huggingface_link
|
73 |
+
== "https://huggingface.co/datasets/google/IFEval"
|
74 |
+
)
|
75 |
+
|
76 |
+
|
77 |
+
@pytest.mark.integration
|
78 |
+
def test_load_dataset(ifeval_parser):
|
79 |
+
"""Test loading the IFEval dataset."""
|
80 |
+
ifeval_parser.load(split="train")
|
81 |
+
assert ifeval_parser.raw_data is not None
|
82 |
+
assert ifeval_parser.split_names == ["train"]
|
83 |
+
assert ifeval_parser._current_task == "default"
|
84 |
+
|
85 |
+
|
86 |
+
def test_parser_string_representation(ifeval_parser):
|
87 |
+
"""Test string representation of IFEval parser."""
|
88 |
+
repr_str = str(ifeval_parser)
|
89 |
+
assert "IFEvalDatasetParser" in repr_str
|
90 |
+
assert "google/IFEval" in repr_str
|
91 |
+
assert "not loaded" in repr_str
|