Upload folder using huggingface_hub
Browse files- api.py +17 -15
- card.py +0 -3
- collections.py +9 -0
- data.py +1 -1
- fusion.py +1 -5
- image_operators.py +3 -1
- inference.py +156 -76
- llm_as_judge_chat_templates.py +11 -2
- llm_as_judge_constants.py +24 -12
- llm_as_judge_from_template.py +3 -3
- loaders.py +249 -239
- metric.py +1 -1
- metrics.py +711 -162
- operator.py +7 -2
- operators.py +45 -28
- serializers.py +9 -1
- settings_utils.py +5 -2
- db_utils.py → sql_utils.py +241 -14
- standard.py +25 -22
- string_operators.py +11 -4
- struct_data_operators.py +9 -4
- version.py +1 -1
api.py
CHANGED
@@ -1,7 +1,6 @@
|
|
1 |
import hashlib
|
2 |
import inspect
|
3 |
import json
|
4 |
-
import tempfile
|
5 |
from datetime import datetime
|
6 |
from functools import lru_cache
|
7 |
from typing import Any, Dict, List, Optional, Union
|
@@ -135,36 +134,39 @@ def create_dataset(
|
|
135 |
|
136 |
|
137 |
def _source_to_dataset(
|
138 |
-
source: SourceOperator,
|
|
|
|
|
|
|
139 |
):
|
140 |
from .dataset import Dataset as UnitxtDataset
|
141 |
|
142 |
stream = source()
|
143 |
|
144 |
-
|
145 |
-
cache_dir = dir_to_be_deleted if not use_cache else None
|
146 |
ds_builder = UnitxtDataset(
|
147 |
dataset_name="unitxt",
|
148 |
config_name="recipe-" + short_hex_hash(repr(source)),
|
149 |
-
hash=hash(repr(source)),
|
150 |
version=constants.version,
|
151 |
-
cache_dir=cache_dir,
|
152 |
)
|
153 |
if split is not None:
|
154 |
stream = {split: stream[split]}
|
155 |
ds_builder._generators = stream
|
156 |
|
157 |
-
|
158 |
-
|
|
|
|
|
159 |
|
160 |
-
|
161 |
-
|
162 |
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
|
|
168 |
|
169 |
|
170 |
def load_dataset(
|
|
|
1 |
import hashlib
|
2 |
import inspect
|
3 |
import json
|
|
|
4 |
from datetime import datetime
|
5 |
from functools import lru_cache
|
6 |
from typing import Any, Dict, List, Optional, Union
|
|
|
134 |
|
135 |
|
136 |
def _source_to_dataset(
|
137 |
+
source: SourceOperator,
|
138 |
+
split=None,
|
139 |
+
use_cache=False,
|
140 |
+
streaming=False,
|
141 |
):
|
142 |
from .dataset import Dataset as UnitxtDataset
|
143 |
|
144 |
stream = source()
|
145 |
|
146 |
+
try:
|
|
|
147 |
ds_builder = UnitxtDataset(
|
148 |
dataset_name="unitxt",
|
149 |
config_name="recipe-" + short_hex_hash(repr(source)),
|
|
|
150 |
version=constants.version,
|
|
|
151 |
)
|
152 |
if split is not None:
|
153 |
stream = {split: stream[split]}
|
154 |
ds_builder._generators = stream
|
155 |
|
156 |
+
ds_builder.download_and_prepare(
|
157 |
+
verification_mode="no_checks",
|
158 |
+
download_mode=None if use_cache else "force_redownload",
|
159 |
+
)
|
160 |
|
161 |
+
if streaming:
|
162 |
+
return ds_builder.as_streaming_dataset(split=split)
|
163 |
|
164 |
+
return ds_builder.as_dataset(
|
165 |
+
split=split, run_post_process=False, verification_mode="no_checks"
|
166 |
+
)
|
167 |
+
|
168 |
+
except DatasetGenerationError as e:
|
169 |
+
raise e.__cause__
|
170 |
|
171 |
|
172 |
def load_dataset(
|
card.py
CHANGED
@@ -21,8 +21,6 @@ class TaskCard(Artifact):
|
|
21 |
specifies the fields (of the already (pre)processed instance) making the inputs, the fields making the outputs, and the metrics to be used for evaluating the model output.
|
22 |
templates:
|
23 |
format strings to be applied on the input fields (specified by the task) and the output fields. The template also carries the instructions and the list of postprocessing steps, to be applied to the model output.
|
24 |
-
default_template:
|
25 |
-
a default template for tasks with very specific task dataset specific template
|
26 |
"""
|
27 |
|
28 |
loader: Loader
|
@@ -31,5 +29,4 @@ class TaskCard(Artifact):
|
|
31 |
templates: Union[
|
32 |
TemplatesDict, TemplatesList, Dict[str, Template], List[Template]
|
33 |
] = None
|
34 |
-
default_template: Template = None
|
35 |
sampler: Sampler = OptionalField(default_factory=RandomSampler)
|
|
|
21 |
specifies the fields (of the already (pre)processed instance) making the inputs, the fields making the outputs, and the metrics to be used for evaluating the model output.
|
22 |
templates:
|
23 |
format strings to be applied on the input fields (specified by the task) and the output fields. The template also carries the instructions and the list of postprocessing steps, to be applied to the model output.
|
|
|
|
|
24 |
"""
|
25 |
|
26 |
loader: Loader
|
|
|
29 |
templates: Union[
|
30 |
TemplatesDict, TemplatesList, Dict[str, Template], List[Template]
|
31 |
] = None
|
|
|
32 |
sampler: Sampler = OptionalField(default_factory=RandomSampler)
|
collections.py
CHANGED
@@ -22,6 +22,10 @@ class Collection(Artifact):
|
|
22 |
def keys(self) -> List[Hashable]:
|
23 |
pass
|
24 |
|
|
|
|
|
|
|
|
|
25 |
|
26 |
class ListCollection(Collection):
|
27 |
items: List[Artifact] = field(default_factory=list)
|
@@ -48,6 +52,11 @@ class DictCollection(Collection):
|
|
48 |
def keys(self) -> List[Hashable]:
|
49 |
return list(self.items.keys())
|
50 |
|
|
|
|
|
|
|
|
|
|
|
51 |
|
52 |
class ItemPicker(Artifact):
|
53 |
item: object = None
|
|
|
22 |
def keys(self) -> List[Hashable]:
|
23 |
pass
|
24 |
|
25 |
+
@abstractmethod
|
26 |
+
def __len__(self):
|
27 |
+
pass
|
28 |
+
|
29 |
|
30 |
class ListCollection(Collection):
|
31 |
items: List[Artifact] = field(default_factory=list)
|
|
|
52 |
def keys(self) -> List[Hashable]:
|
53 |
return list(self.items.keys())
|
54 |
|
55 |
+
def len(self):
|
56 |
+
return len(self.items)
|
57 |
+
|
58 |
+
def __len__(self):
|
59 |
+
return len(self.items)
|
60 |
|
61 |
class ItemPicker(Artifact):
|
62 |
item: object = None
|
data.py
CHANGED
@@ -15,7 +15,6 @@ from .collections_operators import __file__ as _
|
|
15 |
from .dataclass import __file__ as _
|
16 |
from .dataset_utils import __file__ as _
|
17 |
from .dataset_utils import get_dataset_artifact
|
18 |
-
from .db_utils import __file__ as _
|
19 |
from .deprecation_utils import __file__ as _
|
20 |
from .dialog_operators import __file__ as _
|
21 |
from .dict_utils import __file__ as _
|
@@ -58,6 +57,7 @@ from .settings_utils import get_constants
|
|
58 |
from .span_lableing_operators import __file__ as _
|
59 |
from .split_utils import __file__ as _
|
60 |
from .splitters import __file__ as _
|
|
|
61 |
from .standard import __file__ as _
|
62 |
from .stream import __file__ as _
|
63 |
from .stream_operators import __file__ as _
|
|
|
15 |
from .dataclass import __file__ as _
|
16 |
from .dataset_utils import __file__ as _
|
17 |
from .dataset_utils import get_dataset_artifact
|
|
|
18 |
from .deprecation_utils import __file__ as _
|
19 |
from .dialog_operators import __file__ as _
|
20 |
from .dict_utils import __file__ as _
|
|
|
57 |
from .span_lableing_operators import __file__ as _
|
58 |
from .split_utils import __file__ as _
|
59 |
from .splitters import __file__ as _
|
60 |
+
from .sql_utils import __file__ as _
|
61 |
from .standard import __file__ as _
|
62 |
from .stream import __file__ as _
|
63 |
from .stream_operators import __file__ as _
|
fusion.py
CHANGED
@@ -34,11 +34,7 @@ class BaseFusion(SourceOperator):
|
|
34 |
for i in range(len(self.subsets)):
|
35 |
self.named_subsets[i] = self.subsets[i]
|
36 |
else:
|
37 |
-
|
38 |
-
try:
|
39 |
-
self.named_subsets[name] = origin
|
40 |
-
except Exception as e:
|
41 |
-
raise RuntimeError(f"Exception in subset: {name}") from e
|
42 |
|
43 |
def splits(self) -> List[str]:
|
44 |
self.prepare_subsets()
|
|
|
34 |
for i in range(len(self.subsets)):
|
35 |
self.named_subsets[i] = self.subsets[i]
|
36 |
else:
|
37 |
+
self.named_subsets = self.subsets
|
|
|
|
|
|
|
|
|
38 |
|
39 |
def splits(self) -> List[str]:
|
40 |
self.prepare_subsets()
|
image_operators.py
CHANGED
@@ -41,7 +41,9 @@ def image_to_data_url(image: Image, default_format="JPEG"):
|
|
41 |
https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data
|
42 |
"""
|
43 |
image_format = image["format"] if image["format"] else default_format
|
44 |
-
base64_image = _image_to_bytes(
|
|
|
|
|
45 |
return ImageDataString(f"data:image/{image_format.lower()};base64,{base64_image}")
|
46 |
|
47 |
|
|
|
41 |
https://developer.mozilla.org/en-US/docs/Web/URI/Schemes/data
|
42 |
"""
|
43 |
image_format = image["format"] if image["format"] else default_format
|
44 |
+
base64_image = _image_to_bytes(
|
45 |
+
image["image"].convert("RGB"), format=image_format.upper()
|
46 |
+
)
|
47 |
return ImageDataString(f"data:image/{image_format.lower()};base64,{base64_image}")
|
48 |
|
49 |
|
inference.py
CHANGED
@@ -1,6 +1,8 @@
|
|
1 |
import abc
|
2 |
import asyncio
|
|
|
3 |
import dataclasses
|
|
|
4 |
import json
|
5 |
import logging
|
6 |
import os
|
@@ -26,14 +28,14 @@ from typing import (
|
|
26 |
Union,
|
27 |
)
|
28 |
|
29 |
-
from datasets import Dataset, DatasetDict
|
30 |
from tqdm import tqdm, trange
|
31 |
from tqdm.asyncio import tqdm_asyncio
|
32 |
|
33 |
from .artifact import Artifact
|
34 |
from .dataclass import InternalField, NonPositionalField
|
35 |
from .deprecation_utils import deprecation
|
36 |
-
from .error_utils import UnitxtError
|
37 |
from .image_operators import (
|
38 |
EncodeImageToString,
|
39 |
ImageDataString,
|
@@ -277,6 +279,12 @@ class LogProbInferenceEngine(abc.ABC, Artifact):
|
|
277 |
"""
|
278 |
pass
|
279 |
|
|
|
|
|
|
|
|
|
|
|
|
|
280 |
def infer_log_probs(
|
281 |
self,
|
282 |
dataset: Union[List[Dict[str, Any]], Dataset],
|
@@ -296,7 +304,12 @@ class LogProbInferenceEngine(abc.ABC, Artifact):
|
|
296 |
)
|
297 |
|
298 |
[self.verify_instance(instance) for instance in dataset]
|
299 |
-
|
|
|
|
|
|
|
|
|
|
|
300 |
|
301 |
|
302 |
class LazyLoadMixin(Artifact):
|
@@ -811,9 +824,12 @@ class HFPeftInferenceEngine(HFAutoModelInferenceEngine):
|
|
811 |
if AutoConfig.from_pretrained(self.model_name).is_encoder_decoder
|
812 |
else AutoPeftModelForCausalLM
|
813 |
)
|
|
|
|
|
|
|
814 |
|
815 |
self.model = model_class.from_pretrained(
|
816 |
-
pretrained_model_name_or_path=
|
817 |
trust_remote_code=True,
|
818 |
device_map=self.device_map,
|
819 |
low_cpu_mem_usage=self.low_cpu_mem_usage,
|
@@ -858,10 +874,15 @@ class HFPipelineBasedInferenceEngine(
|
|
858 |
def _define_task(self):
|
859 |
from transformers import AutoConfig
|
860 |
|
|
|
|
|
|
|
|
|
861 |
self.task = (
|
862 |
"text2text-generation"
|
863 |
if AutoConfig.from_pretrained(
|
864 |
-
|
|
|
865 |
).is_encoder_decoder
|
866 |
else "text-generation"
|
867 |
)
|
@@ -899,11 +920,15 @@ class HFPipelineBasedInferenceEngine(
|
|
899 |
def _create_pipeline(self, model_args: Dict[str, Any]):
|
900 |
from transformers import pipeline
|
901 |
|
|
|
|
|
|
|
|
|
902 |
self.model = pipeline(
|
903 |
-
model=
|
904 |
task=self.task,
|
905 |
use_fast=self.use_fast_tokenizer,
|
906 |
-
trust_remote_code=
|
907 |
**model_args,
|
908 |
**self.to_dict(
|
909 |
[HFGenerationParamsMixin],
|
@@ -1143,15 +1168,14 @@ class OllamaInferenceEngine(
|
|
1143 |
import ollama
|
1144 |
|
1145 |
args = self.to_dict([StandardAPIParamsMixin])
|
1146 |
-
|
1147 |
results = []
|
1148 |
-
|
1149 |
for instance in dataset:
|
1150 |
messages = self.to_messages(instance)
|
1151 |
response = ollama.chat(
|
1152 |
-
model=self.model,
|
1153 |
messages=messages,
|
1154 |
-
|
|
|
1155 |
)
|
1156 |
results.append(response)
|
1157 |
|
@@ -1610,34 +1634,52 @@ class OpenAiInferenceEngine(
|
|
1610 |
|
1611 |
@run_with_imap
|
1612 |
def _get_chat_completion(self, instance, return_meta_data):
|
|
|
1613 |
messages = self.to_messages(instance)
|
1614 |
-
|
1615 |
-
|
1616 |
-
|
1617 |
-
|
1618 |
-
|
1619 |
-
|
1620 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
1621 |
|
1622 |
@run_with_imap
|
1623 |
def _get_logprobs(self, instance, return_meta_data):
|
|
|
1624 |
messages = self.to_messages(instance)
|
1625 |
-
|
1626 |
-
|
1627 |
-
|
1628 |
-
|
1629 |
-
|
1630 |
-
|
1631 |
-
|
1632 |
-
|
1633 |
-
|
1634 |
-
|
1635 |
-
|
1636 |
-
|
1637 |
-
|
1638 |
-
|
1639 |
-
|
1640 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1641 |
|
1642 |
def get_return_object(self, predict_result, response, return_meta_data):
|
1643 |
if return_meta_data:
|
@@ -1887,7 +1929,7 @@ class WMLGenerationParamsMixin(Artifact):
|
|
1887 |
|
1888 |
class WMLChatParamsMixin(Artifact):
|
1889 |
frequency_penalty: Optional[float] = None
|
1890 |
-
top_logprobs: Optional[int] =
|
1891 |
presence_penalty: Optional[float] = None
|
1892 |
response_format: Optional[Dict[str, Any]] = None
|
1893 |
temperature: Optional[float] = None
|
@@ -1898,7 +1940,7 @@ class WMLChatParamsMixin(Artifact):
|
|
1898 |
|
1899 |
|
1900 |
CredentialsWML = Dict[
|
1901 |
-
Literal["url", "username", "password", "
|
1902 |
]
|
1903 |
|
1904 |
|
@@ -1958,28 +2000,28 @@ class WMLInferenceEngineBase(
|
|
1958 |
and not (self.model_name and self.deployment_id)
|
1959 |
), "Either 'model_name' or 'deployment_id' must be specified, but not both at the same time."
|
1960 |
|
1961 |
-
def process_data_before_dump(self, data):
|
1962 |
-
|
1963 |
-
|
1964 |
-
|
1965 |
-
|
1966 |
-
|
1967 |
-
|
1968 |
-
|
1969 |
|
1970 |
def _initialize_wml_client(self):
|
1971 |
-
from ibm_watsonx_ai.client import APIClient
|
1972 |
|
1973 |
-
if self.credentials is None:
|
1974 |
self.credentials = self._read_wml_credentials_from_env()
|
1975 |
self._verify_wml_credentials(self.credentials)
|
1976 |
-
|
1977 |
-
|
1978 |
-
|
1979 |
-
|
1980 |
-
|
1981 |
-
|
1982 |
-
|
1983 |
|
1984 |
@staticmethod
|
1985 |
def _read_wml_credentials_from_env() -> CredentialsWML:
|
@@ -2002,6 +2044,8 @@ class WMLInferenceEngineBase(
|
|
2002 |
"only one of those defined in the env."
|
2003 |
)
|
2004 |
credentials["space_id"] = space_id
|
|
|
|
|
2005 |
elif project_id:
|
2006 |
credentials["project_id"] = project_id
|
2007 |
else:
|
@@ -2024,7 +2068,7 @@ class WMLInferenceEngineBase(
|
|
2024 |
)
|
2025 |
|
2026 |
if apikey:
|
2027 |
-
credentials["
|
2028 |
elif username and password:
|
2029 |
credentials["username"] = username
|
2030 |
credentials["password"] = password
|
@@ -2042,7 +2086,7 @@ class WMLInferenceEngineBase(
|
|
2042 |
assert isoftype(credentials, CredentialsWML), (
|
2043 |
"WML credentials object must be a dictionary which may "
|
2044 |
"contain only the following keys: "
|
2045 |
-
"['url', '
|
2046 |
)
|
2047 |
|
2048 |
assert credentials.get(
|
@@ -2052,10 +2096,10 @@ class WMLInferenceEngineBase(
|
|
2052 |
"Either 'space_id' or 'project_id' must be provided "
|
2053 |
"as keys for WML credentials dict."
|
2054 |
)
|
2055 |
-
assert "
|
2056 |
"username" in credentials and "password" in credentials
|
2057 |
), (
|
2058 |
-
"Either '
|
2059 |
"as keys for WML credentials dict."
|
2060 |
)
|
2061 |
|
@@ -2229,7 +2273,8 @@ class WMLInferenceEngineGeneration(WMLInferenceEngineBase, WMLGenerationParamsMi
|
|
2229 |
# currently this is the only configuration that returns generated
|
2230 |
# logprobs and behaves as expected
|
2231 |
logprobs_return_options = {
|
2232 |
-
"input_tokens": True,
|
|
|
2233 |
"generated_tokens": True,
|
2234 |
"token_logprobs": True,
|
2235 |
"top_n_tokens": user_return_options.get("top_n_tokens", 5),
|
@@ -2346,7 +2391,9 @@ class WMLInferenceEngineChat(WMLInferenceEngineBase, WMLChatParamsMixin):
|
|
2346 |
results = wml_inference.infer(dataset["test"])
|
2347 |
"""
|
2348 |
|
2349 |
-
image_encoder: Optional[EncodeImageToString] =
|
|
|
|
|
2350 |
|
2351 |
@staticmethod
|
2352 |
def _extract_queries(instance: Dict[str, Any]) -> Tuple[Optional[str], List]:
|
@@ -2386,21 +2433,26 @@ class WMLInferenceEngineChat(WMLInferenceEngineBase, WMLChatParamsMixin):
|
|
2386 |
if image is not None:
|
2387 |
encoded_image = image
|
2388 |
if not isinstance(encoded_image, str):
|
|
|
2389 |
if self.image_encoder is None:
|
2390 |
raise ValueError(
|
2391 |
"If sending image queries as well, and they are not "
|
2392 |
"already encoded to base64 strings, you must specify "
|
2393 |
"the 'image_encoder' to be used."
|
2394 |
)
|
2395 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
2396 |
|
2397 |
message["content"].append(
|
2398 |
{
|
2399 |
"type": "image_url",
|
2400 |
"image_url": {
|
2401 |
-
"url":
|
2402 |
-
"data:image/jpeg;base64," + encoded_image
|
2403 |
-
),
|
2404 |
},
|
2405 |
}
|
2406 |
)
|
@@ -2550,7 +2602,7 @@ class LMMSEvalBaseInferenceEngine(
|
|
2550 |
model_type: str
|
2551 |
model_args: Dict[str, str]
|
2552 |
batch_size: int = 1
|
2553 |
-
image_token = "<image>"
|
2554 |
|
2555 |
_requirements_list = {
|
2556 |
"lmms_eval": "Install llms-eval package using 'pip install lmms-eval==0.2.4'",
|
@@ -2576,6 +2628,7 @@ class LMMSEvalBaseInferenceEngine(
|
|
2576 |
{
|
2577 |
"batch_size": self.batch_size,
|
2578 |
"device": self.device,
|
|
|
2579 |
},
|
2580 |
)
|
2581 |
|
@@ -2714,7 +2767,7 @@ class VLLMParamsMixin(Artifact):
|
|
2714 |
presence_penalty: float = 0.0
|
2715 |
frequency_penalty: float = 0.0
|
2716 |
repetition_penalty: float = 1.0
|
2717 |
-
temperature: float =
|
2718 |
top_p: float = 1.0
|
2719 |
top_k: int = -1
|
2720 |
min_p: float = 0.0
|
@@ -2731,13 +2784,21 @@ class VLLMParamsMixin(Artifact):
|
|
2731 |
|
2732 |
class VLLMInferenceEngine(InferenceEngine, PackageRequirementsMixin, VLLMParamsMixin):
|
2733 |
def prepare_engine(self):
|
2734 |
-
from vllm import LLM, SamplingParams
|
2735 |
-
|
2736 |
args = self.to_dict([VLLMParamsMixin])
|
2737 |
args.pop("model")
|
|
|
2738 |
|
2739 |
self.sampling_params = SamplingParams(**args)
|
2740 |
-
self.llm = LLM(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2741 |
|
2742 |
def _infer(
|
2743 |
self,
|
@@ -2749,6 +2810,7 @@ class VLLMInferenceEngine(InferenceEngine, PackageRequirementsMixin, VLLMParamsM
|
|
2749 |
inputs.append(instance["source"])
|
2750 |
|
2751 |
if isinstance(inputs[0], list):
|
|
|
2752 |
outputs = self.llm.chat(inputs, self.sampling_params)
|
2753 |
else:
|
2754 |
outputs = self.llm.generate(inputs, self.sampling_params)
|
@@ -2862,12 +2924,12 @@ class LiteLLMInferenceEngine(
|
|
2862 |
self, dataset: List[Dict[str, Any]]
|
2863 |
) -> List[TextGenerationInferenceOutput]:
|
2864 |
"""Process multiple inference requests concurrently with a progress bar."""
|
2865 |
-
tasks =
|
2866 |
self._infer_instance(i, instance) for i, instance in enumerate(dataset)
|
2867 |
-
|
2868 |
# Use tqdm_asyncio.gather to display progress bar
|
2869 |
return await tqdm_asyncio.gather(
|
2870 |
-
*tasks, desc=f"LiteLLM Inference ({self.model})", total=len(
|
2871 |
)
|
2872 |
|
2873 |
def _infer(
|
@@ -2923,10 +2985,12 @@ class CrossProviderInferenceEngine(InferenceEngine, StandardAPIParamsMixin):
|
|
2923 |
mapping each supported API to a corresponding
|
2924 |
model identifier string. This mapping allows consistent access to models
|
2925 |
across different API backends.
|
|
|
2926 |
"""
|
2927 |
|
2928 |
label: str = "cross_provider"
|
2929 |
provider: Optional[_supported_apis] = None
|
|
|
2930 |
|
2931 |
provider_model_map: Dict[_supported_apis, Dict[str, str]] = {
|
2932 |
"watsonx": {
|
@@ -2939,8 +3003,10 @@ class CrossProviderInferenceEngine(InferenceEngine, StandardAPIParamsMixin):
|
|
2939 |
"llama-3-2-1b-instruct": "watsonx/meta-llama/llama-3-2-1b-instruct",
|
2940 |
"llama-3-2-11b-vision-instruct": "watsonx/meta-llama/llama-3-2-11b-vision-instruct",
|
2941 |
"llama-3-2-90b-vision-instruct": "watsonx/meta-llama/llama-3-2-90b-vision-instruct",
|
|
|
2942 |
},
|
2943 |
"watsonx-sdk": {
|
|
|
2944 |
"llama-3-8b-instruct": "meta-llama/llama-3-8b-instruct",
|
2945 |
"llama-3-70b-instruct": "meta-llama/llama-3-70b-instruct",
|
2946 |
"granite-3-8b-instruct": "ibm/granite-3-8b-instruct",
|
@@ -3084,12 +3150,18 @@ class CrossProviderInferenceEngine(InferenceEngine, StandardAPIParamsMixin):
|
|
3084 |
f"{provider} is not a configured API for CrossProviderInferenceEngine. Supported apis: {','.join(self.provider_model_map.keys())}"
|
3085 |
)
|
3086 |
if self.model not in self.provider_model_map[provider]:
|
3087 |
-
|
3088 |
-
f"{self.model} is not configured for provider {provider}. Supported models: {','.join(self.provider_model_map[provider].keys())}"
|
3089 |
)
|
3090 |
cls = self.__class__._provider_to_base_class[provider]
|
3091 |
args = self.to_dict([StandardAPIParamsMixin])
|
3092 |
-
args["model"] = self.provider_model_map[provider]
|
|
|
|
|
|
|
|
|
|
|
|
|
3093 |
params = list(args.keys())
|
3094 |
if provider in self._provider_param_renaming:
|
3095 |
for param in params:
|
@@ -3135,8 +3207,16 @@ class HFOptionSelectingInferenceEngine(InferenceEngine, TorchDeviceMixin):
|
|
3135 |
self.device = self.get_device()
|
3136 |
|
3137 |
# Load model and tokenizer
|
3138 |
-
|
3139 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3140 |
self.device
|
3141 |
)
|
3142 |
# Set pad_token if it doesn't exist
|
|
|
1 |
import abc
|
2 |
import asyncio
|
3 |
+
import base64
|
4 |
import dataclasses
|
5 |
+
import io
|
6 |
import json
|
7 |
import logging
|
8 |
import os
|
|
|
28 |
Union,
|
29 |
)
|
30 |
|
31 |
+
from datasets import Dataset, DatasetDict, Image
|
32 |
from tqdm import tqdm, trange
|
33 |
from tqdm.asyncio import tqdm_asyncio
|
34 |
|
35 |
from .artifact import Artifact
|
36 |
from .dataclass import InternalField, NonPositionalField
|
37 |
from .deprecation_utils import deprecation
|
38 |
+
from .error_utils import UnitxtError, UnitxtWarning
|
39 |
from .image_operators import (
|
40 |
EncodeImageToString,
|
41 |
ImageDataString,
|
|
|
279 |
"""
|
280 |
pass
|
281 |
|
282 |
+
def _mock_infer_log_probs(
|
283 |
+
self,
|
284 |
+
dataset: Union[List[Dict[str, Any]], Dataset],
|
285 |
+
) -> Union[List[str], List[TextGenerationInferenceOutput]]:
|
286 |
+
return [mock_logprobs_default_value_factory() for instance in dataset]
|
287 |
+
|
288 |
def infer_log_probs(
|
289 |
self,
|
290 |
dataset: Union[List[Dict[str, Any]], Dataset],
|
|
|
304 |
)
|
305 |
|
306 |
[self.verify_instance(instance) for instance in dataset]
|
307 |
+
|
308 |
+
if settings.mock_inference_mode:
|
309 |
+
result = self._mock_infer_log_probs(dataset)
|
310 |
+
else:
|
311 |
+
result = self._infer_log_probs(dataset, return_meta_data)
|
312 |
+
return result
|
313 |
|
314 |
|
315 |
class LazyLoadMixin(Artifact):
|
|
|
824 |
if AutoConfig.from_pretrained(self.model_name).is_encoder_decoder
|
825 |
else AutoPeftModelForCausalLM
|
826 |
)
|
827 |
+
path = self.peft_config.base_model_name_or_path
|
828 |
+
if settings.hf_offline_models_path is not None:
|
829 |
+
path = os.path.join(settings.hf_offline_models_path, path)
|
830 |
|
831 |
self.model = model_class.from_pretrained(
|
832 |
+
pretrained_model_name_or_path=path,
|
833 |
trust_remote_code=True,
|
834 |
device_map=self.device_map,
|
835 |
low_cpu_mem_usage=self.low_cpu_mem_usage,
|
|
|
874 |
def _define_task(self):
|
875 |
from transformers import AutoConfig
|
876 |
|
877 |
+
path = self.model_name
|
878 |
+
if settings.hf_offline_models_path is not None:
|
879 |
+
path = os.path.join(settings.hf_offline_models_path, path)
|
880 |
+
|
881 |
self.task = (
|
882 |
"text2text-generation"
|
883 |
if AutoConfig.from_pretrained(
|
884 |
+
path,
|
885 |
+
trust_remote_code=True,
|
886 |
).is_encoder_decoder
|
887 |
else "text-generation"
|
888 |
)
|
|
|
920 |
def _create_pipeline(self, model_args: Dict[str, Any]):
|
921 |
from transformers import pipeline
|
922 |
|
923 |
+
path = self.model_name
|
924 |
+
if settings.hf_offline_models_path is not None:
|
925 |
+
path = os.path.join(settings.hf_offline_models_path, path)
|
926 |
+
|
927 |
self.model = pipeline(
|
928 |
+
model=path,
|
929 |
task=self.task,
|
930 |
use_fast=self.use_fast_tokenizer,
|
931 |
+
trust_remote_code=settings.allow_unverified_code,
|
932 |
**model_args,
|
933 |
**self.to_dict(
|
934 |
[HFGenerationParamsMixin],
|
|
|
1168 |
import ollama
|
1169 |
|
1170 |
args = self.to_dict([StandardAPIParamsMixin])
|
|
|
1171 |
results = []
|
1172 |
+
model = args.pop("model")
|
1173 |
for instance in dataset:
|
1174 |
messages = self.to_messages(instance)
|
1175 |
response = ollama.chat(
|
|
|
1176 |
messages=messages,
|
1177 |
+
model=model,
|
1178 |
+
options=args,
|
1179 |
)
|
1180 |
results.append(response)
|
1181 |
|
|
|
1634 |
|
1635 |
@run_with_imap
|
1636 |
def _get_chat_completion(self, instance, return_meta_data):
|
1637 |
+
import openai
|
1638 |
messages = self.to_messages(instance)
|
1639 |
+
try:
|
1640 |
+
response = self.client.chat.completions.create(
|
1641 |
+
messages=messages,
|
1642 |
+
model=self.model_name,
|
1643 |
+
**self._get_completion_kwargs(),
|
1644 |
+
)
|
1645 |
+
prediction = response.choices[0].message.content
|
1646 |
+
return self.get_return_object(prediction, response, return_meta_data)
|
1647 |
+
# catch in case of content_filtering failure
|
1648 |
+
except openai.BadRequestError as e:
|
1649 |
+
logging.error(f"Error predicting instance {messages}:{e}. Returning empty prediction")
|
1650 |
+
return TextGenerationInferenceOutput(prediction = "-", input_tokens=0, output_tokens=0)
|
1651 |
+
|
1652 |
|
1653 |
@run_with_imap
|
1654 |
def _get_logprobs(self, instance, return_meta_data):
|
1655 |
+
import openai
|
1656 |
messages = self.to_messages(instance)
|
1657 |
+
try:
|
1658 |
+
response = self.client.chat.completions.create(
|
1659 |
+
messages=messages,
|
1660 |
+
model=self.model_name,
|
1661 |
+
**self._get_completion_kwargs(),
|
1662 |
+
)
|
1663 |
+
top_logprobs_response = response.choices[0].logprobs.content
|
1664 |
+
pred_output = [
|
1665 |
+
{
|
1666 |
+
"top_tokens": [
|
1667 |
+
{"text": obj.token, "logprob": obj.logprob}
|
1668 |
+
for obj in generated_token.top_logprobs
|
1669 |
+
]
|
1670 |
+
}
|
1671 |
+
for generated_token in top_logprobs_response
|
1672 |
+
]
|
1673 |
+
return self.get_return_object(pred_output, response, return_meta_data)
|
1674 |
+
# catch in case of content_filtering failure
|
1675 |
+
except openai.BadRequestError as e:
|
1676 |
+
logging.error(f"Error predicting instance {messages}:{e}. Returning empty prediction")
|
1677 |
+
prediction = [{"top_tokens": [
|
1678 |
+
{"text": "-", "logprob": 0}
|
1679 |
+
]
|
1680 |
+
}]
|
1681 |
+
return TextGenerationInferenceOutput(prediction=prediction, input_tokens=0, output_tokens=0)
|
1682 |
+
|
1683 |
|
1684 |
def get_return_object(self, predict_result, response, return_meta_data):
|
1685 |
if return_meta_data:
|
|
|
1929 |
|
1930 |
class WMLChatParamsMixin(Artifact):
|
1931 |
frequency_penalty: Optional[float] = None
|
1932 |
+
top_logprobs: Optional[int] = None
|
1933 |
presence_penalty: Optional[float] = None
|
1934 |
response_format: Optional[Dict[str, Any]] = None
|
1935 |
temperature: Optional[float] = None
|
|
|
1940 |
|
1941 |
|
1942 |
CredentialsWML = Dict[
|
1943 |
+
Literal["url", "username", "password", "api_key", "project_id", "space_id"], str
|
1944 |
]
|
1945 |
|
1946 |
|
|
|
2000 |
and not (self.model_name and self.deployment_id)
|
2001 |
), "Either 'model_name' or 'deployment_id' must be specified, but not both at the same time."
|
2002 |
|
2003 |
+
# def process_data_before_dump(self, data):
|
2004 |
+
# if "credentials" in data:
|
2005 |
+
# for key, value in data["credentials"].items():
|
2006 |
+
# if key != "url":
|
2007 |
+
# data["credentials"][key] = "<hidden>"
|
2008 |
+
# else:
|
2009 |
+
# data["credentials"][key] = value
|
2010 |
+
# return data
|
2011 |
|
2012 |
def _initialize_wml_client(self):
|
2013 |
+
from ibm_watsonx_ai.client import APIClient, Credentials
|
2014 |
|
2015 |
+
if self.credentials is None or len(self.credentials) == 0: # TODO: change
|
2016 |
self.credentials = self._read_wml_credentials_from_env()
|
2017 |
self._verify_wml_credentials(self.credentials)
|
2018 |
+
return APIClient(
|
2019 |
+
credentials=Credentials(
|
2020 |
+
api_key=self.credentials["api_key"],
|
2021 |
+
url=self.credentials["url"]
|
2022 |
+
),
|
2023 |
+
project_id=self.credentials.get("project_id", None),
|
2024 |
+
space_id=self.credentials.get("space_id", None))
|
2025 |
|
2026 |
@staticmethod
|
2027 |
def _read_wml_credentials_from_env() -> CredentialsWML:
|
|
|
2044 |
"only one of those defined in the env."
|
2045 |
)
|
2046 |
credentials["space_id"] = space_id
|
2047 |
+
elif space_id:
|
2048 |
+
credentials["space_id"] = space_id
|
2049 |
elif project_id:
|
2050 |
credentials["project_id"] = project_id
|
2051 |
else:
|
|
|
2068 |
)
|
2069 |
|
2070 |
if apikey:
|
2071 |
+
credentials["api_key"] = apikey
|
2072 |
elif username and password:
|
2073 |
credentials["username"] = username
|
2074 |
credentials["password"] = password
|
|
|
2086 |
assert isoftype(credentials, CredentialsWML), (
|
2087 |
"WML credentials object must be a dictionary which may "
|
2088 |
"contain only the following keys: "
|
2089 |
+
"['url', 'api_key', 'username', 'password']."
|
2090 |
)
|
2091 |
|
2092 |
assert credentials.get(
|
|
|
2096 |
"Either 'space_id' or 'project_id' must be provided "
|
2097 |
"as keys for WML credentials dict."
|
2098 |
)
|
2099 |
+
assert "api_key" in credentials or (
|
2100 |
"username" in credentials and "password" in credentials
|
2101 |
), (
|
2102 |
+
"Either 'api_key' or both 'username' and 'password' must be provided "
|
2103 |
"as keys for WML credentials dict."
|
2104 |
)
|
2105 |
|
|
|
2273 |
# currently this is the only configuration that returns generated
|
2274 |
# logprobs and behaves as expected
|
2275 |
logprobs_return_options = {
|
2276 |
+
"input_tokens": user_return_options.get("input_tokens", True),
|
2277 |
+
"input_text": user_return_options.get("input_text", False),
|
2278 |
"generated_tokens": True,
|
2279 |
"token_logprobs": True,
|
2280 |
"top_n_tokens": user_return_options.get("top_n_tokens", 5),
|
|
|
2391 |
results = wml_inference.infer(dataset["test"])
|
2392 |
"""
|
2393 |
|
2394 |
+
image_encoder: Optional[EncodeImageToString] = NonPositionalField(
|
2395 |
+
default_factory=EncodeImageToString
|
2396 |
+
)
|
2397 |
|
2398 |
@staticmethod
|
2399 |
def _extract_queries(instance: Dict[str, Any]) -> Tuple[Optional[str], List]:
|
|
|
2433 |
if image is not None:
|
2434 |
encoded_image = image
|
2435 |
if not isinstance(encoded_image, str):
|
2436 |
+
image = Image().decode_example(image)
|
2437 |
if self.image_encoder is None:
|
2438 |
raise ValueError(
|
2439 |
"If sending image queries as well, and they are not "
|
2440 |
"already encoded to base64 strings, you must specify "
|
2441 |
"the 'image_encoder' to be used."
|
2442 |
)
|
2443 |
+
|
2444 |
+
buffer = io.BytesIO()
|
2445 |
+
image.save(buffer, format=image.format)
|
2446 |
+
image_data_url = ImageDataString(
|
2447 |
+
f"data:image/{image.format.lower()};base64,"
|
2448 |
+
+ base64.b64encode(buffer.getvalue()).decode("utf-8")
|
2449 |
+
)
|
2450 |
|
2451 |
message["content"].append(
|
2452 |
{
|
2453 |
"type": "image_url",
|
2454 |
"image_url": {
|
2455 |
+
"url": image_data_url,
|
|
|
|
|
2456 |
},
|
2457 |
}
|
2458 |
)
|
|
|
2602 |
model_type: str
|
2603 |
model_args: Dict[str, str]
|
2604 |
batch_size: int = 1
|
2605 |
+
image_token: str = "<image>"
|
2606 |
|
2607 |
_requirements_list = {
|
2608 |
"lmms_eval": "Install llms-eval package using 'pip install lmms-eval==0.2.4'",
|
|
|
2628 |
{
|
2629 |
"batch_size": self.batch_size,
|
2630 |
"device": self.device,
|
2631 |
+
"device_map": self.device,
|
2632 |
},
|
2633 |
)
|
2634 |
|
|
|
2767 |
presence_penalty: float = 0.0
|
2768 |
frequency_penalty: float = 0.0
|
2769 |
repetition_penalty: float = 1.0
|
2770 |
+
temperature: float = 0.0
|
2771 |
top_p: float = 1.0
|
2772 |
top_k: int = -1
|
2773 |
min_p: float = 0.0
|
|
|
2784 |
|
2785 |
class VLLMInferenceEngine(InferenceEngine, PackageRequirementsMixin, VLLMParamsMixin):
|
2786 |
def prepare_engine(self):
|
|
|
|
|
2787 |
args = self.to_dict([VLLMParamsMixin])
|
2788 |
args.pop("model")
|
2789 |
+
from vllm import LLM, SamplingParams
|
2790 |
|
2791 |
self.sampling_params = SamplingParams(**args)
|
2792 |
+
self.llm = LLM(
|
2793 |
+
model=self.model,
|
2794 |
+
device="auto",
|
2795 |
+
trust_remote_code=True,
|
2796 |
+
max_num_batched_tokens=4096,
|
2797 |
+
gpu_memory_utilization=0.7,
|
2798 |
+
max_model_len=4096,
|
2799 |
+
max_num_seqs=64,
|
2800 |
+
enforce_eager=True,
|
2801 |
+
)
|
2802 |
|
2803 |
def _infer(
|
2804 |
self,
|
|
|
2810 |
inputs.append(instance["source"])
|
2811 |
|
2812 |
if isinstance(inputs[0], list):
|
2813 |
+
# outputs = self.llm.chat(inputs, self.sampling_params, chat_template="{{- bos_token }}\n{%- if custom_tools is defined %}\n {%- set tools = custom_tools %}\n{%- endif %}\n{%- if not tools_in_user_message is defined %}\n {%- set tools_in_user_message = true %}\n{%- endif %}\n{%- if not date_string is defined %}\n {%- if strftime_now is defined %}\n {%- set date_string = strftime_now(\"%d %b %Y\") %}\n {%- else %}\n {%- set date_string = \"26 Jul 2024\" %}\n {%- endif %}\n{%- endif %}\n{%- if not tools is defined %}\n {%- set tools = none %}\n{%- endif %}\n\n{#- This block extracts the system message, so we can slot it into the right place. #}\n{%- if messages[0]['role'] == 'system' %}\n {%- set system_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- set user_supplied_system_message = true %}\n{%- else %}\n {%- set system_message = \"\" %}\n {%- set user_supplied_system_message = false %}\n{%- endif %}\n\n{#- Find out if there are any images #}\n{% set image_ns = namespace(has_images=false) %} \n{%- for message in messages %}\n {%- for content in message['content'] %}\n {%- if content['type'] == 'image' %}\n {%- set image_ns.has_images = true %}\n {%- endif %}\n {%- endfor %}\n{%- endfor %}\n\n{#- System message if there are no images, or if the user supplied one #}\n{%- if user_supplied_system_message or not image_ns.has_images %}\n {{- \"<|start_header_id|>system<|end_header_id|>\\n\" }}\n {%- if tools is not none %}\n {{- \"Environment: ipython\\n\" }}\n {%- endif %}\n {{- \"Cutting Knowledge Date: December 2023\\n\" }}\n {{- \"Today Date: \" + date_string + \"\\n\" }}\n {%- if tools is not none and not tools_in_user_message %}\n {{- \"You have access to the following functions. To call a function, please respond with JSON for a function call.\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\" }}\n {%- endfor %}\n {%- endif %}\n {{- system_message }}\n {{- \"<|eot_id|>\" }}\n{%- endif %}\n\n{#- Custom tools are passed in a user message with some extra guidance #}\n{%- if tools_in_user_message and not tools is none %}\n {#- Extract the first user message so we can plug it in here #}\n {%- if messages | length != 0 %}\n {%- set first_user_message = messages[0]['content']|trim %}\n {%- set messages = messages[1:] %}\n {%- else %}\n {{- raise_exception(\"Cannot put tools in the first user message when there's no first user message!\") }}\n{%- endif %}\n {{- '<|start_header_id|>user<|end_header_id|>\\n' -}}\n {{- \"Given the following functions, please respond with a JSON for a function call \" }}\n {{- \"with its proper arguments that best answers the given prompt.\\n\" }}\n {{- 'Respond in the format {\"name\": function name, \"parameters\": dictionary of argument name and its value}.' }}\n {{- \"Do not use variables.\\n\" }}\n {%- for t in tools %}\n {{- t | tojson(indent=4) }}\n {{- \"\\n\" }}\n {%- endfor %}\n {{- first_user_message + \"<|eot_id|>\"}}\n{%- endif %}\n\n{%- for message in messages %}\n {%- if not (message.role == 'ipython' or message.role == 'tool' or 'tool_calls' in message) %}\n {{- '<|start_header_id|>' + message['role'] + '<|end_header_id|>\\n' }}\n {%- if message['content'] is string %}\n {{- message['content'] }}\n {%- else %}\n {%- for content in message['content'] %}\n {%- if content['type'] == 'image' %}\n {{- '<|image|>' }}\n {%- elif content['type'] == 'text' %}\n {{- content['text'] }}\n {%- endif %}\n {%- endfor %}\n {%- endif %}\n {{- '<|eot_id|>' }}\n {%- elif 'tool_calls' in message %}\n {%- if not message.tool_calls|length == 1 %}\n {{- raise_exception(\"This model only supports single tool-calls at once!\") }}\n {%- endif %}\n {%- set tool_call = message.tool_calls[0].function %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n' -}}\n {{- '{\"name\": \"' + tool_call.name + '\", ' }}\n {{- '\"parameters\": ' }}\n {{- tool_call.arguments | tojson }}\n {{- \"}\" }}\n {{- \"<|eot_id|>\" }}\n {%- elif message.role == \"tool\" or message.role == \"ipython\" %}\n {{- \"<|start_header_id|>ipython<|end_header_id|>\\n\" }}\n {%- if message.content is mapping or message.content is iterable %}\n {{- message.content | tojson }}\n {%- else %}\n {{- message.content }}\n {%- endif %}\n {{- \"<|eot_id|>\" }}\n {%- endif %}\n{%- endfor %}\n{%- if add_generation_prompt %}\n {{- '<|start_header_id|>assistant<|end_header_id|>\\n' }}\n{%- endif %}")
|
2814 |
outputs = self.llm.chat(inputs, self.sampling_params)
|
2815 |
else:
|
2816 |
outputs = self.llm.generate(inputs, self.sampling_params)
|
|
|
2924 |
self, dataset: List[Dict[str, Any]]
|
2925 |
) -> List[TextGenerationInferenceOutput]:
|
2926 |
"""Process multiple inference requests concurrently with a progress bar."""
|
2927 |
+
tasks = (
|
2928 |
self._infer_instance(i, instance) for i, instance in enumerate(dataset)
|
2929 |
+
)
|
2930 |
# Use tqdm_asyncio.gather to display progress bar
|
2931 |
return await tqdm_asyncio.gather(
|
2932 |
+
*tasks, desc=f"LiteLLM Inference ({self.model})", total=len(dataset)
|
2933 |
)
|
2934 |
|
2935 |
def _infer(
|
|
|
2985 |
mapping each supported API to a corresponding
|
2986 |
model identifier string. This mapping allows consistent access to models
|
2987 |
across different API backends.
|
2988 |
+
provider_specific_args: (Optional[Dict[str, Dict[str,str]]]) Args specific to a provider for example provider_specific_args={"watsonx": {"max_requests_per_second": 4}}
|
2989 |
"""
|
2990 |
|
2991 |
label: str = "cross_provider"
|
2992 |
provider: Optional[_supported_apis] = None
|
2993 |
+
provider_specific_args: Optional[Dict[str, Dict[str,str]]] = None
|
2994 |
|
2995 |
provider_model_map: Dict[_supported_apis, Dict[str, str]] = {
|
2996 |
"watsonx": {
|
|
|
3003 |
"llama-3-2-1b-instruct": "watsonx/meta-llama/llama-3-2-1b-instruct",
|
3004 |
"llama-3-2-11b-vision-instruct": "watsonx/meta-llama/llama-3-2-11b-vision-instruct",
|
3005 |
"llama-3-2-90b-vision-instruct": "watsonx/meta-llama/llama-3-2-90b-vision-instruct",
|
3006 |
+
"mistral-large-instruct": "watsonx/mistralai/mistral-large",
|
3007 |
},
|
3008 |
"watsonx-sdk": {
|
3009 |
+
"llama-3-2-11b-vision-instruct": "meta-llama/llama-3-2-11b-vision-instruct",
|
3010 |
"llama-3-8b-instruct": "meta-llama/llama-3-8b-instruct",
|
3011 |
"llama-3-70b-instruct": "meta-llama/llama-3-70b-instruct",
|
3012 |
"granite-3-8b-instruct": "ibm/granite-3-8b-instruct",
|
|
|
3150 |
f"{provider} is not a configured API for CrossProviderInferenceEngine. Supported apis: {','.join(self.provider_model_map.keys())}"
|
3151 |
)
|
3152 |
if self.model not in self.provider_model_map[provider]:
|
3153 |
+
UnitxtWarning(
|
3154 |
+
f"{self.model} is not configured for provider {provider}. Supported models: {','.join(self.provider_model_map[provider].keys())}. Using un normalized name will make it impossible to switch to different provider on request."
|
3155 |
)
|
3156 |
cls = self.__class__._provider_to_base_class[provider]
|
3157 |
args = self.to_dict([StandardAPIParamsMixin])
|
3158 |
+
args["model"] = self.provider_model_map[provider].get(self.model, self.model)
|
3159 |
+
|
3160 |
+
if self.provider_specific_args is not None:
|
3161 |
+
provider_args = self.provider_specific_args.get(provider)
|
3162 |
+
if provider_args is not None:
|
3163 |
+
args.update(provider_args)
|
3164 |
+
|
3165 |
params = list(args.keys())
|
3166 |
if provider in self._provider_param_renaming:
|
3167 |
for param in params:
|
|
|
3207 |
self.device = self.get_device()
|
3208 |
|
3209 |
# Load model and tokenizer
|
3210 |
+
path = self.model_name
|
3211 |
+
if settings.hf_offline_models_path is not None:
|
3212 |
+
path = os.path.join(settings.hf_offline_models_path, path)
|
3213 |
+
|
3214 |
+
self.tokenizer = AutoTokenizer.from_pretrained(
|
3215 |
+
path,
|
3216 |
+
)
|
3217 |
+
self.model = AutoModelForCausalLM.from_pretrained(
|
3218 |
+
path,
|
3219 |
+
).to(
|
3220 |
self.device
|
3221 |
)
|
3222 |
# Set pad_token if it doesn't exist
|
llm_as_judge_chat_templates.py
CHANGED
@@ -8,14 +8,19 @@ The context includes information relevant to the nature or generation of the res
|
|
8 |
You will assess the quality of the response subject to an evaluation criteria.
|
9 |
###Context:
|
10 |
{context_variables}
|
|
|
11 |
###Response:
|
12 |
{response}
|
|
|
13 |
###Evaluation criteria:
|
14 |
{criteria_description}
|
15 |
{display_options_instruction}
|
|
|
16 |
Briefly assess the quality of the response subject to the evaluation criteria.
|
17 |
Focus on the evaluation criteria during assessment, do not provide a general assessment.
|
18 |
-
Assessment:
|
|
|
|
|
19 |
),
|
20 |
"summarization": InputOutputTemplate(
|
21 |
input_format="""Transform the following assessment into a concise summary that focuses on the key details, excluding references to the assessment itself.
|
@@ -41,9 +46,11 @@ You will choose the better quality response subject to the evaluation criteria.
|
|
41 |
|
42 |
This is the context:
|
43 |
{context_variables}
|
|
|
44 |
This is the evaluation criteria:
|
45 |
{criteria_name}
|
46 |
{criteria_description}
|
|
|
47 |
Response {option_a}:
|
48 |
{response_a}
|
49 |
Response {option_b}:
|
@@ -51,7 +58,9 @@ Response {option_b}:
|
|
51 |
|
52 |
Keeping the evaluation criteria in mind, briefly assess which response is better.
|
53 |
Focus on the evaluation criteria during assessment, do not provide a general assessment.
|
54 |
-
Assessment:
|
|
|
|
|
55 |
),
|
56 |
"summarization": InputOutputTemplate(
|
57 |
input_format="""Transform the following assessment into a concise summary that focuses on the key details, excluding references to the assessment itself. The summary must clearly state which response won.
|
|
|
8 |
You will assess the quality of the response subject to an evaluation criteria.
|
9 |
###Context:
|
10 |
{context_variables}
|
11 |
+
|
12 |
###Response:
|
13 |
{response}
|
14 |
+
|
15 |
###Evaluation criteria:
|
16 |
{criteria_description}
|
17 |
{display_options_instruction}
|
18 |
+
|
19 |
Briefly assess the quality of the response subject to the evaluation criteria.
|
20 |
Focus on the evaluation criteria during assessment, do not provide a general assessment.
|
21 |
+
Assessment:
|
22 |
+
|
23 |
+
Lets think step by step """
|
24 |
),
|
25 |
"summarization": InputOutputTemplate(
|
26 |
input_format="""Transform the following assessment into a concise summary that focuses on the key details, excluding references to the assessment itself.
|
|
|
46 |
|
47 |
This is the context:
|
48 |
{context_variables}
|
49 |
+
|
50 |
This is the evaluation criteria:
|
51 |
{criteria_name}
|
52 |
{criteria_description}
|
53 |
+
|
54 |
Response {option_a}:
|
55 |
{response_a}
|
56 |
Response {option_b}:
|
|
|
58 |
|
59 |
Keeping the evaluation criteria in mind, briefly assess which response is better.
|
60 |
Focus on the evaluation criteria during assessment, do not provide a general assessment.
|
61 |
+
Assessment:
|
62 |
+
|
63 |
+
Lets think step by step """
|
64 |
),
|
65 |
"summarization": InputOutputTemplate(
|
66 |
input_format="""Transform the following assessment into a concise summary that focuses on the key details, excluding references to the assessment itself. The summary must clearly state which response won.
|
llm_as_judge_constants.py
CHANGED
@@ -84,8 +84,6 @@ class EvaluatorNameEnum(str, Enum):
|
|
84 |
GRANITE3_8B = "Granite3.0-8b"
|
85 |
GRANITE3_1_2B = "Granite3.1-2b"
|
86 |
GRANITE3_1_8B = "Granite3.1-8b"
|
87 |
-
GRANITE_GUARDIAN_2B = "Granite Guardian 3.0 2B"
|
88 |
-
GRANITE_GUARDIAN_8B = "Granite Guardian 3.0 8B"
|
89 |
|
90 |
|
91 |
class ModelProviderEnum(str, Enum):
|
@@ -112,8 +110,6 @@ EVALUATOR_TO_MODEL_ID = {
|
|
112 |
EvaluatorNameEnum.GRANITE3_8B: "ibm/granite-3-8b-instruct",
|
113 |
EvaluatorNameEnum.GRANITE3_1_2B: "ibm/granite-3.1-2b-instruct",
|
114 |
EvaluatorNameEnum.GRANITE3_1_8B: "ibm/granite-3.1-8b-instruct",
|
115 |
-
EvaluatorNameEnum.GRANITE_GUARDIAN_2B: "ibm/granite-guardian-3-2b",
|
116 |
-
EvaluatorNameEnum.GRANITE_GUARDIAN_8B: "ibm/granite-guardian-3-8b",
|
117 |
}
|
118 |
|
119 |
MODEL_RENAMINGS = {
|
@@ -189,14 +185,6 @@ EVALUATORS_METADATA = [
|
|
189 |
EvaluatorNameEnum.LLAMA3_1_405B,
|
190 |
[ModelProviderEnum.WATSONX, ModelProviderEnum.RITS],
|
191 |
),
|
192 |
-
EvaluatorMetadata(
|
193 |
-
EvaluatorNameEnum.GRANITE_GUARDIAN_2B,
|
194 |
-
[ModelProviderEnum.WATSONX],
|
195 |
-
),
|
196 |
-
EvaluatorMetadata(
|
197 |
-
EvaluatorNameEnum.GRANITE_GUARDIAN_8B,
|
198 |
-
[ModelProviderEnum.WATSONX],
|
199 |
-
),
|
200 |
]
|
201 |
|
202 |
################################ Direct Assessment Criterias ################################
|
@@ -946,6 +934,30 @@ class DirectCriteriaCatalogEnum(Enum):
|
|
946 |
},
|
947 |
)
|
948 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
949 |
|
950 |
DIRECT_CRITERIA = [c.value for c in DirectCriteriaCatalogEnum]
|
951 |
|
|
|
84 |
GRANITE3_8B = "Granite3.0-8b"
|
85 |
GRANITE3_1_2B = "Granite3.1-2b"
|
86 |
GRANITE3_1_8B = "Granite3.1-8b"
|
|
|
|
|
87 |
|
88 |
|
89 |
class ModelProviderEnum(str, Enum):
|
|
|
110 |
EvaluatorNameEnum.GRANITE3_8B: "ibm/granite-3-8b-instruct",
|
111 |
EvaluatorNameEnum.GRANITE3_1_2B: "ibm/granite-3.1-2b-instruct",
|
112 |
EvaluatorNameEnum.GRANITE3_1_8B: "ibm/granite-3.1-8b-instruct",
|
|
|
|
|
113 |
}
|
114 |
|
115 |
MODEL_RENAMINGS = {
|
|
|
185 |
EvaluatorNameEnum.LLAMA3_1_405B,
|
186 |
[ModelProviderEnum.WATSONX, ModelProviderEnum.RITS],
|
187 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
188 |
]
|
189 |
|
190 |
################################ Direct Assessment Criterias ################################
|
|
|
934 |
},
|
935 |
)
|
936 |
|
937 |
+
CORRECTNESS_BASED_ON_GROUND_TRUTH = CriteriaWithOptions(
|
938 |
+
name="correctness_based_on_ground_truth",
|
939 |
+
description="Does the response correctly convey the same factual information as the ground truth?",
|
940 |
+
options=[
|
941 |
+
CriteriaOption(
|
942 |
+
name="correct",
|
943 |
+
description="The response conveys the same factual meaning as the ground truth. Minor rewording, synonyms, or grammatical differences are acceptable. The response is relevant to the question and does not introduce unrelated or misleading information.",
|
944 |
+
),
|
945 |
+
CriteriaOption(
|
946 |
+
name="partially_correct",
|
947 |
+
description="The response contains some correct information but is incomplete or lacks essential details. It may also contain minor inaccuracies or extraneous information that slightly misrepresents the ground truth.",
|
948 |
+
),
|
949 |
+
CriteriaOption(
|
950 |
+
name="incorrect",
|
951 |
+
description="The response does not align with the ground truth. It either presents incorrect, unrelated, or misleading information, or omits key details that change the intended meaning.",
|
952 |
+
),
|
953 |
+
],
|
954 |
+
option_map={
|
955 |
+
"correct": 1.0,
|
956 |
+
"partially_correct": 0.5,
|
957 |
+
"incorrect": 0.0,
|
958 |
+
},
|
959 |
+
)
|
960 |
+
|
961 |
|
962 |
DIRECT_CRITERIA = [c.value for c in DirectCriteriaCatalogEnum]
|
963 |
|
llm_as_judge_from_template.py
CHANGED
@@ -459,12 +459,12 @@ class TaskBasedLLMasJudge(LLMAsJudgeBase):
|
|
459 |
judge_task_input_field, judge_task_input_field
|
460 |
)
|
461 |
new_val = input_instance.get(orig_task_field_name)
|
462 |
-
if
|
463 |
new_val = prediction.get(orig_task_field_name)
|
464 |
-
if new_val:
|
465 |
instance_task_data[judge_task_input_field] = new_val
|
466 |
|
467 |
-
if self.prediction_field and prediction:
|
468 |
if isinstance(prediction, dict):
|
469 |
prediction = prediction[self.prediction_field]
|
470 |
instance_task_data[self.prediction_field] = prediction
|
|
|
459 |
judge_task_input_field, judge_task_input_field
|
460 |
)
|
461 |
new_val = input_instance.get(orig_task_field_name)
|
462 |
+
if new_val is None and isinstance(prediction, dict):
|
463 |
new_val = prediction.get(orig_task_field_name)
|
464 |
+
if new_val is not None:
|
465 |
instance_task_data[judge_task_input_field] = new_val
|
466 |
|
467 |
+
if self.prediction_field and prediction is not None:
|
468 |
if isinstance(prediction, dict):
|
469 |
prediction = prediction[self.prediction_field]
|
470 |
instance_task_data[self.prediction_field] = prediction
|
loaders.py
CHANGED
@@ -36,12 +36,14 @@ import itertools
|
|
36 |
import json
|
37 |
import os
|
38 |
import tempfile
|
|
|
39 |
from abc import abstractmethod
|
40 |
from pathlib import Path
|
41 |
from tempfile import TemporaryDirectory
|
42 |
from typing import (
|
43 |
Any,
|
44 |
Dict,
|
|
|
45 |
Iterable,
|
46 |
List,
|
47 |
Literal,
|
@@ -53,24 +55,44 @@ from typing import (
|
|
53 |
|
54 |
import pandas as pd
|
55 |
import requests
|
56 |
-
from datasets import
|
57 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
58 |
from huggingface_hub import HfApi
|
59 |
from tqdm import tqdm
|
60 |
|
61 |
-
from .dataclass import
|
|
|
62 |
from .fusion import FixedFusion
|
63 |
from .logging_utils import get_logger
|
64 |
from .operator import SourceOperator
|
65 |
from .operators import Set
|
66 |
from .settings_utils import get_settings
|
67 |
-
from .stream import MultiStream
|
68 |
from .type_utils import isoftype
|
69 |
-
from .utils import LRUCache
|
70 |
|
71 |
logger = get_logger()
|
72 |
settings = get_settings()
|
73 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
class Loader(SourceOperator):
|
76 |
"""A base class for all loaders.
|
@@ -114,9 +136,11 @@ class Loader(SourceOperator):
|
|
114 |
return f"{self.__class__.__name__}.loader_limit"
|
115 |
|
116 |
def log_limited_loading(self):
|
117 |
-
|
118 |
-
|
119 |
-
|
|
|
|
|
120 |
|
121 |
def add_data_classification(self, multi_stream: MultiStream) -> MultiStream:
|
122 |
if self.data_classification_policy is None:
|
@@ -156,19 +180,46 @@ class Loader(SourceOperator):
|
|
156 |
pass
|
157 |
|
158 |
def load_data(self) -> MultiStream:
|
159 |
-
|
160 |
-
if iterables is None:
|
161 |
iterables = self.load_iterables()
|
162 |
-
|
163 |
-
|
|
|
|
|
164 |
return MultiStream.from_iterables(iterables, copying=True)
|
165 |
|
166 |
def process(self) -> MultiStream:
|
167 |
self._maybe_set_classification_policy()
|
168 |
return self.add_data_classification(self.load_data())
|
169 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
170 |
|
171 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
172 |
"""Loads datasets from the HuggingFace Hub.
|
173 |
|
174 |
It supports loading with or without streaming,
|
@@ -213,13 +264,7 @@ class LoadHF(Loader):
|
|
213 |
streaming: bool = None
|
214 |
filtering_lambda: Optional[str] = None
|
215 |
num_proc: Optional[int] = None
|
216 |
-
|
217 |
-
|
218 |
-
def verify(self):
|
219 |
-
for requirement in self.requirements_list:
|
220 |
-
if requirement not in self._requirements_list:
|
221 |
-
self._requirements_list.append(requirement)
|
222 |
-
super().verify()
|
223 |
|
224 |
def filter_load(self, dataset: DatasetDict):
|
225 |
if not settings.allow_unverified_code:
|
@@ -234,12 +279,15 @@ class LoadHF(Loader):
|
|
234 |
return settings.stream_hf_datasets_by_default
|
235 |
return self.streaming
|
236 |
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
|
|
|
|
|
|
243 |
try:
|
244 |
dataset = hf_load_dataset(
|
245 |
self.path,
|
@@ -247,44 +295,8 @@ class LoadHF(Loader):
|
|
247 |
data_dir=self.data_dir,
|
248 |
data_files=self.data_files,
|
249 |
revision=self.revision,
|
250 |
-
streaming=
|
251 |
-
|
252 |
-
split=self.split,
|
253 |
-
trust_remote_code=settings.allow_unverified_code,
|
254 |
-
num_proc=self.num_proc,
|
255 |
-
)
|
256 |
-
except ValueError as e:
|
257 |
-
if "trust_remote_code" in str(e):
|
258 |
-
raise ValueError(
|
259 |
-
f"{self.__class__.__name__} cannot run remote code from huggingface without setting unitxt.settings.allow_unverified_code=True or by setting environment variable: UNITXT_ALLOW_UNVERIFIED_CODE."
|
260 |
-
) from e
|
261 |
-
raise e
|
262 |
-
|
263 |
-
if self.split is not None:
|
264 |
-
dataset = {self.split: dataset}
|
265 |
-
|
266 |
-
if self.filtering_lambda is not None:
|
267 |
-
dataset = self.filter_load(dataset)
|
268 |
-
|
269 |
-
return dataset
|
270 |
-
|
271 |
-
def load_dataset(self):
|
272 |
-
with tempfile.TemporaryDirectory() as dir_to_be_deleted:
|
273 |
-
if settings.disable_hf_datasets_cache:
|
274 |
-
cache_dir = dir_to_be_deleted
|
275 |
-
else:
|
276 |
-
cache_dir = None
|
277 |
-
try:
|
278 |
-
dataset = hf_load_dataset(
|
279 |
-
self.path,
|
280 |
-
name=self.name,
|
281 |
-
data_dir=self.data_dir,
|
282 |
-
data_files=self.data_files,
|
283 |
-
streaming=False,
|
284 |
-
keep_in_memory=True,
|
285 |
-
cache_dir=cache_dir,
|
286 |
-
split=self.split,
|
287 |
-
trust_remote_code=settings.allow_unverified_code,
|
288 |
num_proc=self.num_proc,
|
289 |
)
|
290 |
except ValueError as e:
|
@@ -292,11 +304,10 @@ class LoadHF(Loader):
|
|
292 |
raise ValueError(
|
293 |
f"{self.__class__.__name__} cannot run remote code from huggingface without setting unitxt.settings.allow_unverified_code=True or by setting environment variable: UNITXT_ALLOW_UNVERIFIED_CODE."
|
294 |
) from e
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
return dataset
|
300 |
|
301 |
def _maybe_set_classification_policy(self):
|
302 |
if os.path.exists(self.path):
|
@@ -309,34 +320,57 @@ class LoadHF(Loader):
|
|
309 |
None, # No warning when loading from public hub
|
310 |
)
|
311 |
|
312 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
313 |
try:
|
314 |
-
dataset = self.
|
315 |
except (
|
316 |
NotImplementedError
|
317 |
): # streaming is not supported for zipped files so we load without streaming
|
318 |
-
dataset = self.load_dataset()
|
319 |
|
320 |
if self.filtering_lambda is not None:
|
321 |
dataset = self.filter_load(dataset)
|
322 |
|
323 |
limit = self.get_limit()
|
324 |
-
if limit is
|
325 |
-
|
326 |
-
|
327 |
-
for
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
split_limit = limit
|
332 |
-
result[split_name] = dataset[split_name].take(split_limit)
|
333 |
-
|
334 |
-
return result
|
335 |
-
|
336 |
-
return dataset
|
337 |
|
338 |
|
339 |
-
class LoadCSV(
|
340 |
"""Loads data from CSV files.
|
341 |
|
342 |
Supports streaming and can handle large files by loading them in chunks.
|
@@ -381,6 +415,7 @@ class LoadCSV(Loader):
|
|
381 |
args = {}
|
382 |
if self.file_type == "csv":
|
383 |
args["sep"] = self.sep
|
|
|
384 |
if self.compression is not None:
|
385 |
args["compression"] = self.compression
|
386 |
if self.lines is not None:
|
@@ -389,19 +424,44 @@ class LoadCSV(Loader):
|
|
389 |
args["nrows"] = self.get_limit()
|
390 |
return args
|
391 |
|
392 |
-
def
|
393 |
-
|
394 |
-
|
395 |
-
|
|
|
|
|
|
|
396 |
if self.get_limit() is not None:
|
397 |
self.log_limited_loading()
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
402 |
|
403 |
|
404 |
-
class LoadFromSklearn(
|
405 |
"""Loads datasets from the sklearn library.
|
406 |
|
407 |
This loader does not support streaming and is intended for use with sklearn's dataset fetch functions.
|
@@ -437,15 +497,22 @@ class LoadFromSklearn(Loader):
|
|
437 |
|
438 |
self.downloader = getattr(sklearn_datatasets, f"fetch_{self.dataset_name}")
|
439 |
|
440 |
-
def
|
441 |
-
|
442 |
-
|
443 |
-
|
444 |
-
|
445 |
-
|
446 |
-
|
447 |
-
|
448 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
449 |
|
450 |
|
451 |
class MissingKaggleCredentialsError(ValueError):
|
@@ -683,7 +750,7 @@ class LoadFromIBMCloud(Loader):
|
|
683 |
return dataset
|
684 |
|
685 |
|
686 |
-
class MultipleSourceLoader(
|
687 |
"""Allows loading data from multiple sources, potentially mixing different types of loaders.
|
688 |
|
689 |
Args:
|
@@ -707,20 +774,23 @@ class MultipleSourceLoader(Loader):
|
|
707 |
|
708 |
sources: List[Loader]
|
709 |
|
710 |
-
# MultipleSourceLoaders uses the the data classification from source loaders,
|
711 |
-
# so only need to add it, if explicitly requested to override.
|
712 |
def add_data_classification(self, multi_stream: MultiStream) -> MultiStream:
|
713 |
if self.data_classification_policy is None:
|
714 |
return multi_stream
|
715 |
return super().add_data_classification(multi_stream)
|
716 |
|
717 |
-
def
|
718 |
-
|
|
|
|
|
|
|
719 |
|
720 |
-
def
|
721 |
-
|
722 |
-
subsets=self.sources,
|
723 |
-
|
|
|
|
|
724 |
|
725 |
|
726 |
class LoadFromDictionary(Loader):
|
@@ -775,11 +845,8 @@ class LoadFromDictionary(Loader):
|
|
775 |
return self.data
|
776 |
|
777 |
|
778 |
-
class LoadFromHFSpace(
|
779 |
-
"""Used to load data from HuggingFace Spaces.
|
780 |
-
|
781 |
-
Loaders firstly tries to download all files specified in the 'data_files' parameter
|
782 |
-
from the given space and then reads them as a HuggingFace Dataset.
|
783 |
|
784 |
Args:
|
785 |
space_name (str):
|
@@ -800,22 +867,6 @@ class LoadFromHFSpace(LoadHF):
|
|
800 |
token_env (str, optional):
|
801 |
Key of an env variable which value will be used for
|
802 |
authentication when accessing the HuggingFace Space - if necessary.
|
803 |
-
|
804 |
-
Example:
|
805 |
-
Loading from a HuggingFace Space
|
806 |
-
|
807 |
-
.. code-block:: python
|
808 |
-
|
809 |
-
loader = LoadFromHFSpace(
|
810 |
-
space_name="lmsys/mt-bench",
|
811 |
-
data_files={
|
812 |
-
"train": [
|
813 |
-
"data/mt_bench/model_answer/gpt-3.5-turbo.jsonl",
|
814 |
-
"data/mt_bench/model_answer/gpt-4.jsonl",
|
815 |
-
],
|
816 |
-
"test": "data/mt_bench/model_answer/tulu-30b.jsonl",
|
817 |
-
},
|
818 |
-
)
|
819 |
"""
|
820 |
|
821 |
space_name: str
|
@@ -840,123 +891,78 @@ class LoadFromHFSpace(LoadHF):
|
|
840 |
return token
|
841 |
return self.use_token
|
842 |
|
843 |
-
|
844 |
-
|
845 |
-
|
|
|
846 |
|
847 |
-
token = self._get_token()
|
848 |
|
849 |
-
|
850 |
-
|
851 |
-
|
852 |
-
|
853 |
-
|
854 |
-
|
855 |
-
revision=self.revision,
|
856 |
-
local_dir=self.path,
|
857 |
)
|
858 |
-
|
859 |
-
raise ValueError(
|
860 |
-
f"The file '{filename}' was not found in the space '{self.space_name}'. "
|
861 |
-
f"Please check if the filename is correct, or if it exists in that "
|
862 |
-
f"Huggingface space."
|
863 |
-
) from e
|
864 |
-
except RepositoryNotFoundError as e:
|
865 |
-
raise ValueError(
|
866 |
-
f"The Huggingface space '{self.space_name}' was not found. "
|
867 |
-
f"Please check if the name is correct and you have access to the space."
|
868 |
-
) from e
|
869 |
|
870 |
-
|
|
|
|
|
|
|
871 |
|
872 |
-
def _download_data(self) -> str:
|
873 |
-
if isinstance(self.data_files, str):
|
874 |
-
data_files = [self.data_files]
|
875 |
-
elif isinstance(self.data_files, Mapping):
|
876 |
-
data_files = list(self.data_files.values())
|
877 |
-
else:
|
878 |
-
data_files = self.data_files
|
879 |
|
880 |
-
|
881 |
-
|
882 |
-
|
883 |
-
|
884 |
|
885 |
-
|
886 |
-
|
887 |
-
|
888 |
-
]
|
889 |
-
dir_paths_list.extend(dir_paths)
|
890 |
|
891 |
-
|
892 |
-
|
893 |
|
894 |
-
|
|
|
|
|
895 |
|
896 |
-
|
897 |
-
|
898 |
-
|
899 |
-
return any(char in path for char in wildcard_characters)
|
900 |
|
901 |
-
|
902 |
-
|
903 |
-
|
904 |
-
|
905 |
-
|
906 |
-
|
907 |
-
|
908 |
-
|
909 |
-
|
910 |
-
|
911 |
-
self.space_name, repo_type="space", revision=self.revision
|
912 |
-
)
|
913 |
-
if isinstance(self.data_files, str):
|
914 |
-
self.data_files = self._get_file_list_from_wildcard_path(
|
915 |
-
self.data_files, repo_files
|
916 |
-
)
|
917 |
-
elif isinstance(self.data_files, Mapping):
|
918 |
-
new_mapping = {}
|
919 |
-
for k, v in self.data_files.items():
|
920 |
-
if isinstance(v, list):
|
921 |
-
assert all(isinstance(s, str) for s in v)
|
922 |
-
new_mapping[k] = [
|
923 |
-
file
|
924 |
-
for p in v
|
925 |
-
for file in self._get_file_list_from_wildcard_path(
|
926 |
-
p, repo_files
|
927 |
-
)
|
928 |
-
]
|
929 |
-
elif isinstance(v, str):
|
930 |
-
new_mapping[k] = self._get_file_list_from_wildcard_path(
|
931 |
-
v, repo_files
|
932 |
-
)
|
933 |
-
else:
|
934 |
-
raise NotImplementedError(
|
935 |
-
f"Loader does not support input 'data_files' of type Mapping[{type(v)}]"
|
936 |
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
937 |
|
938 |
-
|
939 |
-
|
940 |
-
|
941 |
-
|
942 |
-
|
943 |
-
|
944 |
-
|
945 |
-
]
|
946 |
-
else:
|
947 |
-
raise NotImplementedError(
|
948 |
-
f"Loader does not support input 'data_files' of type {type(self.data_files)}"
|
949 |
-
)
|
950 |
-
|
951 |
-
def _maybe_set_classification_policy(self):
|
952 |
-
self.set_default_data_classification(
|
953 |
-
["public"], "when loading from Huggingface spaces"
|
954 |
-
)
|
955 |
|
956 |
-
def load_data(self):
|
957 |
-
self._map_wildcard_path_to_full_paths()
|
958 |
-
self.path = self._download_data()
|
959 |
-
return super().load_data()
|
960 |
|
961 |
|
962 |
class LoadFromAPI(Loader):
|
@@ -985,6 +991,9 @@ class LoadFromAPI(Loader):
|
|
985 |
Defaults to "data".
|
986 |
method (str, optional):
|
987 |
The HTTP method to use for API requests. Defaults to "GET".
|
|
|
|
|
|
|
988 |
"""
|
989 |
|
990 |
urls: Dict[str, str]
|
@@ -995,6 +1004,7 @@ class LoadFromAPI(Loader):
|
|
995 |
headers: Optional[Dict[str, Any]] = None
|
996 |
data_field: str = "data"
|
997 |
method: str = "GET"
|
|
|
998 |
|
999 |
# class level shared cache:
|
1000 |
_loader_cache = LRUCache(max_size=settings.loader_cache_size)
|
@@ -1026,13 +1036,13 @@ class LoadFromAPI(Loader):
|
|
1026 |
response = requests.get(
|
1027 |
url,
|
1028 |
headers=base_headers,
|
1029 |
-
verify=
|
1030 |
)
|
1031 |
elif self.method == "POST":
|
1032 |
response = requests.post(
|
1033 |
url,
|
1034 |
headers=base_headers,
|
1035 |
-
verify=
|
1036 |
json={},
|
1037 |
)
|
1038 |
else:
|
|
|
36 |
import json
|
37 |
import os
|
38 |
import tempfile
|
39 |
+
import time
|
40 |
from abc import abstractmethod
|
41 |
from pathlib import Path
|
42 |
from tempfile import TemporaryDirectory
|
43 |
from typing import (
|
44 |
Any,
|
45 |
Dict,
|
46 |
+
Generator,
|
47 |
Iterable,
|
48 |
List,
|
49 |
Literal,
|
|
|
55 |
|
56 |
import pandas as pd
|
57 |
import requests
|
58 |
+
from datasets import (
|
59 |
+
DatasetDict,
|
60 |
+
DownloadConfig,
|
61 |
+
IterableDataset,
|
62 |
+
IterableDatasetDict,
|
63 |
+
get_dataset_split_names,
|
64 |
+
)
|
65 |
+
from datasets import load_dataset as _hf_load_dataset
|
66 |
from huggingface_hub import HfApi
|
67 |
from tqdm import tqdm
|
68 |
|
69 |
+
from .dataclass import NonPositionalField
|
70 |
+
from .error_utils import UnitxtError, UnitxtWarning
|
71 |
from .fusion import FixedFusion
|
72 |
from .logging_utils import get_logger
|
73 |
from .operator import SourceOperator
|
74 |
from .operators import Set
|
75 |
from .settings_utils import get_settings
|
76 |
+
from .stream import DynamicStream, MultiStream
|
77 |
from .type_utils import isoftype
|
78 |
+
from .utils import LRUCache, recursive_copy
|
79 |
|
80 |
logger = get_logger()
|
81 |
settings = get_settings()
|
82 |
|
83 |
+
def hf_load_dataset(path: str, *args, **kwargs):
|
84 |
+
if settings.hf_offline_datasets_path is not None:
|
85 |
+
path = os.path.join(settings.hf_offline_datasets_path, path)
|
86 |
+
return _hf_load_dataset(
|
87 |
+
path,
|
88 |
+
*args, **kwargs,
|
89 |
+
download_config=DownloadConfig(
|
90 |
+
max_retries=settings.loaders_max_retries,
|
91 |
+
),
|
92 |
+
verification_mode="no_checks",
|
93 |
+
trust_remote_code=settings.allow_unverified_code,
|
94 |
+
download_mode= "force_redownload" if settings.disable_hf_datasets_cache else "reuse_dataset_if_exists"
|
95 |
+
)
|
96 |
|
97 |
class Loader(SourceOperator):
|
98 |
"""A base class for all loaders.
|
|
|
136 |
return f"{self.__class__.__name__}.loader_limit"
|
137 |
|
138 |
def log_limited_loading(self):
|
139 |
+
if not hasattr(self, "_already_logged_limited_loading") or not self._already_logged_limited_loading:
|
140 |
+
self._already_logged_limited_loading = True
|
141 |
+
logger.info(
|
142 |
+
f"\nLoading limited to {self.get_limit()} instances by setting {self.get_limiter()};"
|
143 |
+
)
|
144 |
|
145 |
def add_data_classification(self, multi_stream: MultiStream) -> MultiStream:
|
146 |
if self.data_classification_policy is None:
|
|
|
180 |
pass
|
181 |
|
182 |
def load_data(self) -> MultiStream:
|
183 |
+
try:
|
|
|
184 |
iterables = self.load_iterables()
|
185 |
+
except Exception as e:
|
186 |
+
raise UnitxtError(f"Error in loader:\n{self}") from e
|
187 |
+
if isoftype(iterables, MultiStream):
|
188 |
+
return iterables
|
189 |
return MultiStream.from_iterables(iterables, copying=True)
|
190 |
|
191 |
def process(self) -> MultiStream:
|
192 |
self._maybe_set_classification_policy()
|
193 |
return self.add_data_classification(self.load_data())
|
194 |
|
195 |
+
def get_splits(self):
|
196 |
+
return list(self().keys())
|
197 |
+
|
198 |
+
|
199 |
+
class LazyLoader(Loader):
|
200 |
+
split: Optional[str] = NonPositionalField(default=None)
|
201 |
+
|
202 |
+
@abstractmethod
|
203 |
+
def get_splits(self) -> List[str]:
|
204 |
+
pass
|
205 |
|
206 |
+
@abstractmethod
|
207 |
+
def split_generator(self, split: str) -> Generator:
|
208 |
+
pass
|
209 |
+
|
210 |
+
def load_iterables(self) -> Union[Dict[str, DynamicStream], IterableDatasetDict]:
|
211 |
+
if self.split is not None:
|
212 |
+
splits = [self.split]
|
213 |
+
else:
|
214 |
+
splits = self.get_splits()
|
215 |
+
|
216 |
+
return MultiStream({
|
217 |
+
split: DynamicStream(self.split_generator, gen_kwargs={"split": split})
|
218 |
+
for split in splits
|
219 |
+
})
|
220 |
+
|
221 |
+
|
222 |
+
class LoadHF(LazyLoader):
|
223 |
"""Loads datasets from the HuggingFace Hub.
|
224 |
|
225 |
It supports loading with or without streaming,
|
|
|
264 |
streaming: bool = None
|
265 |
filtering_lambda: Optional[str] = None
|
266 |
num_proc: Optional[int] = None
|
267 |
+
splits: Optional[List[str]] = None
|
|
|
|
|
|
|
|
|
|
|
|
|
268 |
|
269 |
def filter_load(self, dataset: DatasetDict):
|
270 |
if not settings.allow_unverified_code:
|
|
|
279 |
return settings.stream_hf_datasets_by_default
|
280 |
return self.streaming
|
281 |
|
282 |
+
# returns Dict when split names are not known in advance, and just the the single split dataset - if known
|
283 |
+
def load_dataset(
|
284 |
+
self, split: str, streaming=None, disable_memory_caching=False
|
285 |
+
) -> Union[IterableDatasetDict, IterableDataset]:
|
286 |
+
dataset_id = str(self) + "_" + str(split)
|
287 |
+
dataset = self.__class__._loader_cache.get(dataset_id, None)
|
288 |
+
if dataset is None:
|
289 |
+
if streaming is None:
|
290 |
+
streaming = self.is_streaming()
|
291 |
try:
|
292 |
dataset = hf_load_dataset(
|
293 |
self.path,
|
|
|
295 |
data_dir=self.data_dir,
|
296 |
data_files=self.data_files,
|
297 |
revision=self.revision,
|
298 |
+
streaming=streaming,
|
299 |
+
split=split,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
300 |
num_proc=self.num_proc,
|
301 |
)
|
302 |
except ValueError as e:
|
|
|
304 |
raise ValueError(
|
305 |
f"{self.__class__.__name__} cannot run remote code from huggingface without setting unitxt.settings.allow_unverified_code=True or by setting environment variable: UNITXT_ALLOW_UNVERIFIED_CODE."
|
306 |
) from e
|
307 |
+
self.__class__._loader_cache.max_size = settings.loader_cache_size
|
308 |
+
if not disable_memory_caching:
|
309 |
+
self.__class__._loader_cache[dataset_id] = dataset
|
310 |
+
return self.__class__._loader_cache[dataset_id]
|
|
|
311 |
|
312 |
def _maybe_set_classification_policy(self):
|
313 |
if os.path.exists(self.path):
|
|
|
320 |
None, # No warning when loading from public hub
|
321 |
)
|
322 |
|
323 |
+
def get_splits(self):
|
324 |
+
if self.splits is not None:
|
325 |
+
return self.splits
|
326 |
+
try:
|
327 |
+
return get_dataset_split_names(
|
328 |
+
path=self.path,
|
329 |
+
config_name=self.name,
|
330 |
+
trust_remote_code=settings.allow_unverified_code,
|
331 |
+
download_config=DownloadConfig(
|
332 |
+
max_retries=settings.loaders_max_retries,
|
333 |
+
extract_on_the_fly=True,
|
334 |
+
),
|
335 |
+
)
|
336 |
+
except:
|
337 |
+
UnitxtWarning(
|
338 |
+
f'LoadHF(path="{self.path}", name="{self.name}") could not retrieve split names without loading the dataset. Consider defining "splits" in the LoadHF definition to improve loading time.'
|
339 |
+
)
|
340 |
+
try:
|
341 |
+
dataset = self.load_dataset(
|
342 |
+
split=None, disable_memory_caching=True, streaming=True
|
343 |
+
)
|
344 |
+
except (
|
345 |
+
NotImplementedError
|
346 |
+
): # streaming is not supported for zipped files so we load without streaming
|
347 |
+
dataset = self.load_dataset(split=None, streaming=False)
|
348 |
+
return list(dataset.keys())
|
349 |
+
|
350 |
+
def split_generator(self, split: str) -> Generator:
|
351 |
+
if self.get_limit() is not None:
|
352 |
+
self.log_limited_loading()
|
353 |
try:
|
354 |
+
dataset = self.load_dataset(split=split)
|
355 |
except (
|
356 |
NotImplementedError
|
357 |
): # streaming is not supported for zipped files so we load without streaming
|
358 |
+
dataset = self.load_dataset(split=split, streaming=False)
|
359 |
|
360 |
if self.filtering_lambda is not None:
|
361 |
dataset = self.filter_load(dataset)
|
362 |
|
363 |
limit = self.get_limit()
|
364 |
+
if limit is None:
|
365 |
+
yield from dataset
|
366 |
+
else:
|
367 |
+
for i, instance in enumerate(dataset):
|
368 |
+
yield instance
|
369 |
+
if i + 1 >= limit:
|
370 |
+
break
|
|
|
|
|
|
|
|
|
|
|
|
|
371 |
|
372 |
|
373 |
+
class LoadCSV(LazyLoader):
|
374 |
"""Loads data from CSV files.
|
375 |
|
376 |
Supports streaming and can handle large files by loading them in chunks.
|
|
|
415 |
args = {}
|
416 |
if self.file_type == "csv":
|
417 |
args["sep"] = self.sep
|
418 |
+
args["low_memory"] = self.streaming
|
419 |
if self.compression is not None:
|
420 |
args["compression"] = self.compression
|
421 |
if self.lines is not None:
|
|
|
424 |
args["nrows"] = self.get_limit()
|
425 |
return args
|
426 |
|
427 |
+
def get_splits(self) -> List[str]:
|
428 |
+
return list(self.files.keys())
|
429 |
+
|
430 |
+
def split_generator(self, split: str) -> Generator:
|
431 |
+
dataset_id = str(self) + "_" + split
|
432 |
+
dataset = self.__class__._loader_cache.get(dataset_id, None)
|
433 |
+
if dataset is None:
|
434 |
if self.get_limit() is not None:
|
435 |
self.log_limited_loading()
|
436 |
+
for attempt in range(settings.loaders_max_retries):
|
437 |
+
try:
|
438 |
+
reader = self.get_reader()
|
439 |
+
if self.get_limit() is not None:
|
440 |
+
self.log_limited_loading()
|
441 |
+
|
442 |
+
try:
|
443 |
+
dataset = reader(self.files[split], **self.get_args()).to_dict(
|
444 |
+
"records"
|
445 |
+
)
|
446 |
+
except ValueError:
|
447 |
+
import fsspec
|
448 |
+
|
449 |
+
with fsspec.open(self.files[split], mode="rt") as f:
|
450 |
+
dataset = reader(f, **self.get_args()).to_dict("records")
|
451 |
+
except Exception as e:
|
452 |
+
logger.debug(f"Attempt csv load {attempt + 1} failed: {e}")
|
453 |
+
if attempt < settings.loaders_max_retries - 1:
|
454 |
+
time.sleep(2)
|
455 |
+
else:
|
456 |
+
raise e
|
457 |
+
self.__class__._loader_cache.max_size = settings.loader_cache_size
|
458 |
+
self.__class__._loader_cache[dataset_id] = dataset
|
459 |
+
|
460 |
+
for instance in self.__class__._loader_cache[dataset_id]:
|
461 |
+
yield recursive_copy(instance)
|
462 |
|
463 |
|
464 |
+
class LoadFromSklearn(LazyLoader):
|
465 |
"""Loads datasets from the sklearn library.
|
466 |
|
467 |
This loader does not support streaming and is intended for use with sklearn's dataset fetch functions.
|
|
|
497 |
|
498 |
self.downloader = getattr(sklearn_datatasets, f"fetch_{self.dataset_name}")
|
499 |
|
500 |
+
def get_splits(self):
|
501 |
+
return self.splits
|
502 |
+
|
503 |
+
def split_generator(self, split: str) -> Generator:
|
504 |
+
dataset_id = str(self) + "_" + split
|
505 |
+
dataset = self.__class__._loader_cache.get(dataset_id, None)
|
506 |
+
if dataset is None:
|
507 |
+
split_data = self.downloader(subset=split)
|
508 |
+
targets = [split_data["target_names"][t] for t in split_data["target"]]
|
509 |
+
df = pd.DataFrame([split_data["data"], targets]).T
|
510 |
+
df.columns = ["data", "target"]
|
511 |
+
dataset = df.to_dict("records")
|
512 |
+
self.__class__._loader_cache.max_size = settings.loader_cache_size
|
513 |
+
self.__class__._loader_cache[dataset_id] = dataset
|
514 |
+
for instance in self.__class__._loader_cache[dataset_id]:
|
515 |
+
yield recursive_copy(instance)
|
516 |
|
517 |
|
518 |
class MissingKaggleCredentialsError(ValueError):
|
|
|
750 |
return dataset
|
751 |
|
752 |
|
753 |
+
class MultipleSourceLoader(LazyLoader):
|
754 |
"""Allows loading data from multiple sources, potentially mixing different types of loaders.
|
755 |
|
756 |
Args:
|
|
|
774 |
|
775 |
sources: List[Loader]
|
776 |
|
|
|
|
|
777 |
def add_data_classification(self, multi_stream: MultiStream) -> MultiStream:
|
778 |
if self.data_classification_policy is None:
|
779 |
return multi_stream
|
780 |
return super().add_data_classification(multi_stream)
|
781 |
|
782 |
+
def get_splits(self):
|
783 |
+
splits = []
|
784 |
+
for loader in self.sources:
|
785 |
+
splits.extend(loader.get_splits())
|
786 |
+
return list(set(splits))
|
787 |
|
788 |
+
def split_generator(self, split: str) -> Generator[Any, None, None]:
|
789 |
+
yield from FixedFusion(
|
790 |
+
subsets=self.sources,
|
791 |
+
max_instances_per_subset=self.get_limit(),
|
792 |
+
include_splits=[split],
|
793 |
+
)()[split]
|
794 |
|
795 |
|
796 |
class LoadFromDictionary(Loader):
|
|
|
845 |
return self.data
|
846 |
|
847 |
|
848 |
+
class LoadFromHFSpace(LazyLoader):
|
849 |
+
"""Used to load data from HuggingFace Spaces lazily.
|
|
|
|
|
|
|
850 |
|
851 |
Args:
|
852 |
space_name (str):
|
|
|
867 |
token_env (str, optional):
|
868 |
Key of an env variable which value will be used for
|
869 |
authentication when accessing the HuggingFace Space - if necessary.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
870 |
"""
|
871 |
|
872 |
space_name: str
|
|
|
891 |
return token
|
892 |
return self.use_token
|
893 |
|
894 |
+
@staticmethod
|
895 |
+
def _is_wildcard(path: str) -> bool:
|
896 |
+
wildcard_characters = ["*", "?", "[", "]"]
|
897 |
+
return any(char in path for char in wildcard_characters)
|
898 |
|
|
|
899 |
|
900 |
+
|
901 |
+
def _get_repo_files(self):
|
902 |
+
if not hasattr(self, "_repo_files") or self._repo_files is None:
|
903 |
+
api = HfApi()
|
904 |
+
self._repo_files = api.list_repo_files(
|
905 |
+
self.space_name, repo_type="space", revision=self.revision
|
|
|
|
|
906 |
)
|
907 |
+
return self._repo_files
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
908 |
|
909 |
+
def _get_sub_files(self, file: str) -> List[str]:
|
910 |
+
if self._is_wildcard(file):
|
911 |
+
return fnmatch.filter(self._get_repo_files(), file)
|
912 |
+
return [file]
|
913 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
914 |
|
915 |
+
def get_splits(self) -> List[str]:
|
916 |
+
if isinstance(self.data_files, Mapping):
|
917 |
+
return list(self.data_files.keys())
|
918 |
+
return ["train"] # Default to 'train' if not specified
|
919 |
|
920 |
+
def split_generator(self, split: str) -> Generator:
|
921 |
+
from huggingface_hub import hf_hub_download
|
922 |
+
from huggingface_hub.utils import EntryNotFoundError, RepositoryNotFoundError
|
|
|
|
|
923 |
|
924 |
+
token = self._get_token()
|
925 |
+
files = self.data_files.get(split, self.data_files) if isinstance(self.data_files, Mapping) else self.data_files
|
926 |
|
927 |
+
if isinstance(files, str):
|
928 |
+
files = [files]
|
929 |
+
limit = self.get_limit()
|
930 |
|
931 |
+
if limit is not None:
|
932 |
+
total = 0
|
933 |
+
self.log_limited_loading()
|
|
|
934 |
|
935 |
+
for file in files:
|
936 |
+
for sub_file in self._get_sub_files(file):
|
937 |
+
try:
|
938 |
+
file_path = hf_hub_download(
|
939 |
+
repo_id=self.space_name,
|
940 |
+
filename=sub_file,
|
941 |
+
repo_type="space",
|
942 |
+
token=token,
|
943 |
+
revision=self.revision,
|
944 |
+
local_dir=self.path,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
945 |
)
|
946 |
+
except EntryNotFoundError as e:
|
947 |
+
raise ValueError(
|
948 |
+
f"The file '{file}' was not found in the space '{self.space_name}'. "
|
949 |
+
f"Please check if the filename is correct, or if it exists in that "
|
950 |
+
f"Huggingface space."
|
951 |
+
) from e
|
952 |
+
except RepositoryNotFoundError as e:
|
953 |
+
raise ValueError(
|
954 |
+
f"The Huggingface space '{self.space_name}' was not found. "
|
955 |
+
f"Please check if the name is correct and you have access to the space."
|
956 |
+
) from e
|
957 |
|
958 |
+
with open(file_path, encoding="utf-8") as f:
|
959 |
+
for line in f:
|
960 |
+
yield json.loads(line.strip())
|
961 |
+
if limit is not None:
|
962 |
+
total += 1
|
963 |
+
if total >= limit:
|
964 |
+
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
965 |
|
|
|
|
|
|
|
|
|
966 |
|
967 |
|
968 |
class LoadFromAPI(Loader):
|
|
|
991 |
Defaults to "data".
|
992 |
method (str, optional):
|
993 |
The HTTP method to use for API requests. Defaults to "GET".
|
994 |
+
verify_cert (bool):
|
995 |
+
Apply verification of the SSL certificate
|
996 |
+
Defaults as True
|
997 |
"""
|
998 |
|
999 |
urls: Dict[str, str]
|
|
|
1004 |
headers: Optional[Dict[str, Any]] = None
|
1005 |
data_field: str = "data"
|
1006 |
method: str = "GET"
|
1007 |
+
verify_cert: bool = True
|
1008 |
|
1009 |
# class level shared cache:
|
1010 |
_loader_cache = LRUCache(max_size=settings.loader_cache_size)
|
|
|
1036 |
response = requests.get(
|
1037 |
url,
|
1038 |
headers=base_headers,
|
1039 |
+
verify=self.verify_cert,
|
1040 |
)
|
1041 |
elif self.method == "POST":
|
1042 |
response = requests.post(
|
1043 |
url,
|
1044 |
headers=base_headers,
|
1045 |
+
verify=self.verify_cert,
|
1046 |
json={},
|
1047 |
)
|
1048 |
else:
|
metric.py
CHANGED
@@ -13,7 +13,6 @@ from .collections import __file__ as _
|
|
13 |
from .collections_operators import __file__ as _
|
14 |
from .dataclass import __file__ as _
|
15 |
from .dataset_utils import __file__ as _
|
16 |
-
from .db_utils import __file__ as _
|
17 |
from .deprecation_utils import __file__ as _
|
18 |
from .dialog_operators import __file__ as _
|
19 |
from .dict_utils import __file__ as _
|
@@ -55,6 +54,7 @@ from .settings_utils import get_constants
|
|
55 |
from .span_lableing_operators import __file__ as _
|
56 |
from .split_utils import __file__ as _
|
57 |
from .splitters import __file__ as _
|
|
|
58 |
from .standard import __file__ as _
|
59 |
from .stream import __file__ as _
|
60 |
from .stream_operators import __file__ as _
|
|
|
13 |
from .collections_operators import __file__ as _
|
14 |
from .dataclass import __file__ as _
|
15 |
from .dataset_utils import __file__ as _
|
|
|
16 |
from .deprecation_utils import __file__ as _
|
17 |
from .dialog_operators import __file__ as _
|
18 |
from .dict_utils import __file__ as _
|
|
|
54 |
from .span_lableing_operators import __file__ as _
|
55 |
from .split_utils import __file__ as _
|
56 |
from .splitters import __file__ as _
|
57 |
+
from .sql_utils import __file__ as _
|
58 |
from .standard import __file__ as _
|
59 |
from .stream import __file__ as _
|
60 |
from .stream_operators import __file__ as _
|
metrics.py
CHANGED
@@ -1,4 +1,3 @@
|
|
1 |
-
FINQA_HASH = "42430b8613082bb4b85d49210284135d"
|
2 |
import ast
|
3 |
import json
|
4 |
import math
|
@@ -10,14 +9,27 @@ import warnings
|
|
10 |
from abc import ABC, abstractmethod
|
11 |
from collections import Counter, defaultdict
|
12 |
from dataclasses import field
|
|
|
13 |
from functools import lru_cache
|
14 |
-
from typing import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
|
16 |
import evaluate
|
17 |
import numpy
|
18 |
import numpy as np
|
19 |
import pandas as pd
|
20 |
import requests
|
|
|
21 |
from scipy.stats import bootstrap
|
22 |
from scipy.stats._warnings_errors import DegenerateDataWarning
|
23 |
|
@@ -29,12 +41,12 @@ from .dataclass import (
|
|
29 |
NonPositionalField,
|
30 |
OptionalField,
|
31 |
)
|
32 |
-
from .db_utils import get_db_connector
|
33 |
from .deprecation_utils import deprecation
|
34 |
-
from .error_utils import Documentation, UnitxtWarning
|
35 |
from .inference import (
|
36 |
HFPipelineBasedInferenceEngine,
|
37 |
InferenceEngine,
|
|
|
38 |
TorchDeviceMixin,
|
39 |
WMLInferenceEngineGeneration,
|
40 |
)
|
@@ -51,15 +63,33 @@ from .operator import (
|
|
51 |
from .operators import ArtifactFetcherMixin, Copy, Set
|
52 |
from .random_utils import get_seed
|
53 |
from .settings_utils import get_settings
|
|
|
54 |
from .stream import MultiStream, Stream
|
55 |
from .type_utils import Type, isoftype, parse_type_string, to_type_string
|
56 |
from .utils import deep_copy, recursive_copy
|
57 |
|
|
|
|
|
58 |
logger = get_logger()
|
59 |
settings = get_settings()
|
60 |
|
61 |
warnings.filterwarnings("ignore", category=DegenerateDataWarning)
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
class MetricsList(ListCollection):
|
65 |
def verify(self):
|
@@ -378,8 +408,6 @@ class ConfidenceIntervalMixin(Artifact):
|
|
378 |
return result
|
379 |
|
380 |
|
381 |
-
from typing import Generic, TypeVar
|
382 |
-
|
383 |
IntermediateType = TypeVar("IntermediateType")
|
384 |
PredictionType = TypeVar("PredictionType")
|
385 |
|
@@ -1779,7 +1807,7 @@ class ExactMatchMM(InstanceMetric):
|
|
1779 |
@staticmethod
|
1780 |
@lru_cache(maxsize=10000)
|
1781 |
def exact_match(pred, gt):
|
1782 |
-
"""Brought from MMStar"""
|
1783 |
answer = gt.lower().strip().replace("\n", " ")
|
1784 |
predict = pred.lower().strip().replace("\n", " ")
|
1785 |
try:
|
@@ -1894,12 +1922,11 @@ class RelaxedCorrectness(GlobalMetric):
|
|
1894 |
return_dict["relaxed_human_split"].append(score)
|
1895 |
else:
|
1896 |
return_dict["relaxed_augmented_split"].append(score)
|
1897 |
-
|
1898 |
key: sum(value) / len(value)
|
1899 |
for key, value in return_dict.items()
|
1900 |
if len(value) > 0
|
1901 |
}
|
1902 |
-
return return_dict
|
1903 |
|
1904 |
@staticmethod
|
1905 |
def _to_float(text: str):
|
@@ -2010,15 +2037,12 @@ class WebsrcSquadF1(GlobalMetric):
|
|
2010 |
string = string.lower()
|
2011 |
|
2012 |
# strip leading and trailing whitespaces
|
2013 |
-
|
2014 |
-
|
2015 |
-
return string
|
2016 |
|
2017 |
def _tokenize(text):
|
2018 |
# Regex pattern to match words and isolate punctuation
|
2019 |
pattern = r"\w+|[^\w\s]"
|
2020 |
-
|
2021 |
-
return tokens
|
2022 |
|
2023 |
def _compute_f1(sa, sb):
|
2024 |
sa = _normalize_str(sa)
|
@@ -2036,8 +2060,7 @@ class WebsrcSquadF1(GlobalMetric):
|
|
2036 |
comm = sa.intersection(sb)
|
2037 |
prec = len(comm) / len(sb)
|
2038 |
rec = len(comm) / len(sa)
|
2039 |
-
|
2040 |
-
return f1
|
2041 |
|
2042 |
judge_list = []
|
2043 |
for sample in samples:
|
@@ -2302,9 +2325,7 @@ class HuggingfaceMetric(GlobalMetric):
|
|
2302 |
def prepare(self):
|
2303 |
super().prepare()
|
2304 |
|
2305 |
-
self.metric =
|
2306 |
-
self.hf_metric_name, experiment_id=str(uuid.uuid4())
|
2307 |
-
)
|
2308 |
|
2309 |
def compute(
|
2310 |
self,
|
@@ -2379,9 +2400,7 @@ class HuggingfaceBulkMetric(BulkInstanceMetric):
|
|
2379 |
def prepare(self):
|
2380 |
super().prepare()
|
2381 |
|
2382 |
-
self.metric =
|
2383 |
-
self.hf_metric_name, experiment_id=str(uuid.uuid4())
|
2384 |
-
)
|
2385 |
|
2386 |
def compute(
|
2387 |
self,
|
@@ -2426,9 +2445,7 @@ class HuggingfaceInstanceMetric(InstanceMetric):
|
|
2426 |
def prepare(self):
|
2427 |
super().prepare()
|
2428 |
|
2429 |
-
self.metric =
|
2430 |
-
self.hf_metric_name, experiment_id=str(uuid.uuid4())
|
2431 |
-
)
|
2432 |
|
2433 |
def compute(self, references: List[Any], prediction: Any, task_data: Dict) -> dict:
|
2434 |
# invokes module.compute, which invokes, e.g., meteor's _compute
|
@@ -2463,6 +2480,8 @@ class MeteorFast(ReductionInstanceMetric[str, Dict[str, float]]):
|
|
2463 |
|
2464 |
nltk.download("wordnet", quiet=True)
|
2465 |
nltk.download("omw-1.4", quiet=True)
|
|
|
|
|
2466 |
from nltk import word_tokenize
|
2467 |
from nltk.translate import meteor_score
|
2468 |
|
@@ -2530,7 +2549,7 @@ class F1(GlobalMetric):
|
|
2530 |
def prepare(self):
|
2531 |
super().prepare()
|
2532 |
|
2533 |
-
self._metric =
|
2534 |
|
2535 |
def get_str_id(self, str):
|
2536 |
if str not in self.str_to_id:
|
@@ -2807,8 +2826,8 @@ class F1MultiLabel(GlobalMetric, PackageRequirementsMixin):
|
|
2807 |
def prepare(self):
|
2808 |
super().prepare()
|
2809 |
|
2810 |
-
self._metric =
|
2811 |
-
self.metric, "multilabel"
|
2812 |
)
|
2813 |
|
2814 |
def add_str_to_id(self, str):
|
@@ -3443,13 +3462,16 @@ class BertScore(MapReduceMetric[str, Dict[str, float]], TorchDeviceMixin):
|
|
3443 |
|
3444 |
def prepare(self):
|
3445 |
super().prepare()
|
3446 |
-
|
3447 |
-
|
3448 |
-
self.bertscore = load("bertscore", experiment_id=str(uuid.uuid4()))
|
3449 |
|
3450 |
def map_stream(
|
3451 |
self, evaluation_inputs_stream: Generator[EvaluationInput[str], None, None]
|
3452 |
):
|
|
|
|
|
|
|
|
|
|
|
3453 |
predictions = []
|
3454 |
references = []
|
3455 |
for prediction, reference, _ in evaluation_inputs_stream:
|
@@ -3495,17 +3517,17 @@ class SentenceBert(MapReduceMetric[str, float], TorchDeviceMixin):
|
|
3495 |
|
3496 |
def prepare(self):
|
3497 |
super().prepare()
|
3498 |
-
|
3499 |
-
|
3500 |
-
self.model = SentenceTransformer(self.model_name, device=self.get_device_id())
|
3501 |
|
3502 |
def map_stream(
|
3503 |
self, evaluation_inputs_stream: Generator[EvaluationInput, None, None]
|
3504 |
):
|
3505 |
-
|
3506 |
-
# return [0.5 for _ in evaluation_inputs_stream]
|
3507 |
|
3508 |
-
|
|
|
|
|
|
|
3509 |
|
3510 |
scores = []
|
3511 |
|
@@ -3553,15 +3575,23 @@ class Reward(MapReduceMetric[str, float], TorchDeviceMixin):
|
|
3553 |
|
3554 |
def prepare(self):
|
3555 |
super().prepare()
|
3556 |
-
|
3557 |
-
|
3558 |
-
self.model = pipeline(
|
3559 |
-
"text-classification", model=self.model_name, device=self.get_device()
|
3560 |
-
)
|
3561 |
|
3562 |
def map_stream(
|
3563 |
self, evaluation_inputs_stream: Generator[EvaluationInput[str], None, None]
|
3564 |
):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3565 |
inputs = []
|
3566 |
for prediction, references, _ in evaluation_inputs_stream:
|
3567 |
inputs.append({"text": references[0], "text_pair": prediction})
|
@@ -3591,8 +3621,11 @@ class Detector(BulkInstanceMetric):
|
|
3591 |
from transformers import pipeline
|
3592 |
|
3593 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
|
|
|
|
|
3594 |
self.pipe = pipeline(
|
3595 |
-
"text-classification", model=
|
3596 |
)
|
3597 |
|
3598 |
def compute(
|
@@ -3624,10 +3657,14 @@ class RegardMetric(GlobalMetric):
|
|
3624 |
super().prepare()
|
3625 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
3626 |
|
|
|
|
|
|
|
3627 |
self.regard_model = AutoModelForSequenceClassification.from_pretrained(
|
3628 |
-
|
|
|
3629 |
)
|
3630 |
-
self.regard_tokenizer = AutoTokenizer.from_pretrained(
|
3631 |
|
3632 |
def _evaluate(self, predictions, inputs):
|
3633 |
import torch
|
@@ -3810,10 +3847,14 @@ class SafetyMetric(MapReduceMetric[str, Tuple[float, str]], TorchDeviceMixin):
|
|
3810 |
super().prepare()
|
3811 |
from transformers import pipeline
|
3812 |
|
|
|
|
|
|
|
|
|
3813 |
if not settings.mock_inference_mode:
|
3814 |
self.model = pipeline(
|
3815 |
"text-classification",
|
3816 |
-
model=
|
3817 |
device=self.get_device(),
|
3818 |
)
|
3819 |
|
@@ -3997,7 +4038,11 @@ class Perplexity(BulkInstanceMetric):
|
|
3997 |
if self.lm is None:
|
3998 |
from transformers import AutoConfig
|
3999 |
|
4000 |
-
|
|
|
|
|
|
|
|
|
4001 |
self.lm = (
|
4002 |
self.EncoderDecoderLM(
|
4003 |
model_name=self.model_name, single_token_mode=self.single_token_mode
|
@@ -4075,10 +4120,13 @@ class Perplexity(BulkInstanceMetric):
|
|
4075 |
|
4076 |
self.model_name = model_name
|
4077 |
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
|
|
|
|
|
|
4078 |
self.model = (
|
4079 |
-
self.model_class().from_pretrained(
|
4080 |
)
|
4081 |
-
self.tokenizer = AutoTokenizer.from_pretrained(
|
4082 |
if self.tokenizer.pad_token_id is None:
|
4083 |
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
|
4084 |
self.single_token_mode = single_token_mode
|
@@ -4243,7 +4291,7 @@ class FaithfulnessHHEM(BulkInstanceMetric):
|
|
4243 |
batch_size: int = 2
|
4244 |
model_name: str = "vectara/hallucination_evaluation_model"
|
4245 |
prediction_type = str
|
4246 |
-
|
4247 |
max_context_words = 4096
|
4248 |
reduction_map = {"mean": [main_score]}
|
4249 |
|
@@ -4260,9 +4308,11 @@ class FaithfulnessHHEM(BulkInstanceMetric):
|
|
4260 |
else:
|
4261 |
device = "cpu"
|
4262 |
from transformers import AutoModelForSequenceClassification
|
4263 |
-
|
|
|
|
|
4264 |
self.model = AutoModelForSequenceClassification.from_pretrained(
|
4265 |
-
|
4266 |
).to(device)
|
4267 |
|
4268 |
def compute(
|
@@ -4275,7 +4325,8 @@ class FaithfulnessHHEM(BulkInstanceMetric):
|
|
4275 |
|
4276 |
# treat the references as the contexts and the predictions as answers
|
4277 |
# concat references
|
4278 |
-
|
|
|
4279 |
contexts = [" ".join(c.split(" ")[: self.max_context_words]) for c in contexts]
|
4280 |
answers = predictions
|
4281 |
|
@@ -5620,9 +5671,9 @@ class MetricsEnsemble(InstanceMetric, ArtifactFetcherMixin):
|
|
5620 |
|
5621 |
def create_ensemble_scores(self, instance):
|
5622 |
score = self.ensemble(instance)
|
5623 |
-
instance[
|
5624 |
-
|
5625 |
-
|
5626 |
return instance
|
5627 |
|
5628 |
def ensemble(self, instance):
|
@@ -5835,77 +5886,153 @@ class PredictionLength(InstanceMetric):
|
|
5835 |
return {self.main_score: [len(prediction)], "score_name": self.main_score}
|
5836 |
|
5837 |
|
5838 |
-
class
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5839 |
"""Return metric for different kinds of "risk" from the Granite-3.0 Guardian model."""
|
5840 |
|
5841 |
-
main_score = "granite_guardian"
|
5842 |
reduction_map: Dict[str, List[str]] = None
|
5843 |
prediction_type = float
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5844 |
|
5845 |
-
model_name: str = "ibm/granite-guardian-3-8b"
|
5846 |
-
hf_model_name: str = "ibm-granite/granite-guardian-3.0-8b"
|
5847 |
safe_token = "No"
|
5848 |
unsafe_token = "Yes"
|
5849 |
|
5850 |
-
inference_engine:
|
5851 |
generation_params: Dict = None
|
5852 |
risk_name: str = None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5853 |
|
5854 |
-
_requirements_list: List[str] = ["
|
5855 |
|
5856 |
def prepare(self):
|
5857 |
-
self.reduction_map = {"mean": [self.main_score]}
|
5858 |
-
|
5859 |
-
def compute(self, references: List[Any], prediction: Any, task_data: Dict) -> dict:
|
5860 |
from transformers import AutoTokenizer
|
5861 |
-
|
|
|
5862 |
if not hasattr(self, "_tokenizer") or self._tokenizer is None:
|
|
|
|
|
|
|
5863 |
self._tokenizer = AutoTokenizer.from_pretrained(self.hf_model_name)
|
5864 |
-
self.inference_engine = WMLInferenceEngineGeneration(
|
5865 |
-
model_name=self.model_name,
|
5866 |
-
)
|
5867 |
-
self.inference_engine._load_model()
|
5868 |
-
self.model = self.inference_engine._model
|
5869 |
-
self.generation_params = self.inference_engine._set_logprobs_params({})
|
5870 |
|
5871 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5872 |
guardian_config = {"risk_name": self.risk_name}
|
5873 |
-
|
|
|
|
|
|
|
5874 |
messages,
|
5875 |
guardian_config=guardian_config,
|
5876 |
tokenize=False,
|
5877 |
add_generation_prompt=True,
|
5878 |
)
|
5879 |
|
5880 |
-
|
5881 |
-
|
5882 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5883 |
)
|
5884 |
-
|
|
|
|
|
|
|
5885 |
label, prob_of_risk = self.parse_output(generated_tokens_list)
|
5886 |
-
|
5887 |
-
|
5888 |
-
|
5889 |
-
|
5890 |
-
|
5891 |
-
|
5892 |
-
|
5893 |
-
|
5894 |
-
|
5895 |
-
|
5896 |
-
|
5897 |
-
|
5898 |
-
|
5899 |
-
]
|
5900 |
-
elif self.risk_name == "context_relevance":
|
5901 |
-
messages = [
|
5902 |
-
{"role": "user", "content": task_data["question"]},
|
5903 |
-
{"role": "context", "content": "\n".join(task_data["contexts"])},
|
5904 |
-
]
|
5905 |
-
else:
|
5906 |
-
raise NotImplementedError()
|
5907 |
|
5908 |
-
|
|
|
5909 |
|
5910 |
def parse_output(self, generated_tokens_list):
|
5911 |
top_tokens_list = [
|
@@ -5944,10 +6071,179 @@ class GraniteGuardianWMLMetric(InstanceMetric):
|
|
5944 |
).numpy()
|
5945 |
|
5946 |
|
5947 |
-
class
|
5948 |
-
|
5949 |
-
|
5950 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5951 |
|
5952 |
prediction_type = "Any" # string representation is compared
|
5953 |
sql_timeout = 100.0
|
@@ -5955,73 +6251,229 @@ class ExecutionAccuracy(InstanceMetric):
|
|
5955 |
_requirements_list = ["sqlglot", "func_timeout"]
|
5956 |
|
5957 |
@staticmethod
|
5958 |
-
def
|
5959 |
-
|
5960 |
-
from sqlglot.optimizer import optimize
|
5961 |
|
5962 |
-
|
5963 |
-
|
5964 |
-
|
5965 |
-
)
|
5966 |
-
sql_diff = sum(0 if (e.__class__.__name__ == "Keep") else 1 for e in t_diff)
|
5967 |
|
5968 |
-
|
|
|
|
|
|
|
|
|
|
|
5969 |
|
5970 |
-
|
5971 |
-
|
5972 |
-
|
5973 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5974 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5975 |
try:
|
5976 |
-
|
5977 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5978 |
except Exception as e: # Catch specific exceptions if possible
|
5979 |
logger.info(
|
5980 |
-
f"
|
5981 |
)
|
5982 |
|
|
|
|
|
|
|
5983 |
try:
|
5984 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
5985 |
except Exception as e:
|
5986 |
-
|
5987 |
-
|
5988 |
-
) from e
|
5989 |
|
5990 |
-
|
5991 |
-
|
5992 |
-
|
5993 |
-
logger.info(f"Error executing predicted SQL: {e}")
|
5994 |
-
return 0 # if the predicted SQL fails to execute, result is 0
|
5995 |
|
5996 |
if pred_res is None:
|
5997 |
-
|
5998 |
-
|
5999 |
-
|
6000 |
-
|
6001 |
-
|
6002 |
-
|
6003 |
-
|
6004 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
6005 |
|
6006 |
-
|
6007 |
-
"""Normalizes a tuple by sorting its non-None elements.
|
6008 |
|
6009 |
-
|
6010 |
-
|
|
|
6011 |
|
6012 |
-
|
6013 |
-
|
6014 |
-
|
6015 |
-
|
|
|
|
|
|
|
6016 |
|
6017 |
-
return
|
6018 |
-
|
6019 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6020 |
)
|
6021 |
|
6022 |
def compute(self, references: List[Any], prediction: str, task_data: Dict) -> dict:
|
6023 |
-
from func_timeout import FunctionTimedOut, func_timeout
|
6024 |
-
|
6025 |
predicted_sql = prediction
|
6026 |
execution_result: float = 0.0
|
6027 |
|
@@ -6033,18 +6485,115 @@ class ExecutionAccuracy(InstanceMetric):
|
|
6033 |
|
6034 |
db_connector = get_db_connector(task_data["db"]["db_type"])(task_data["db"])
|
6035 |
|
6036 |
-
|
6037 |
-
|
6038 |
-
|
6039 |
-
|
6040 |
-
|
6041 |
-
|
6042 |
-
|
6043 |
-
|
6044 |
-
|
6045 |
-
|
6046 |
-
|
6047 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6048 |
result["score"] = result[self.main_score]
|
6049 |
result["score_name"] = self.main_score
|
6050 |
return result
|
|
|
|
|
1 |
import ast
|
2 |
import json
|
3 |
import math
|
|
|
9 |
from abc import ABC, abstractmethod
|
10 |
from collections import Counter, defaultdict
|
11 |
from dataclasses import field
|
12 |
+
from enum import Enum
|
13 |
from functools import lru_cache
|
14 |
+
from typing import (
|
15 |
+
Any,
|
16 |
+
Dict,
|
17 |
+
Generator,
|
18 |
+
Generic,
|
19 |
+
List,
|
20 |
+
Literal,
|
21 |
+
Optional,
|
22 |
+
Tuple,
|
23 |
+
TypeVar,
|
24 |
+
Union,
|
25 |
+
)
|
26 |
|
27 |
import evaluate
|
28 |
import numpy
|
29 |
import numpy as np
|
30 |
import pandas as pd
|
31 |
import requests
|
32 |
+
from datasets import DownloadConfig
|
33 |
from scipy.stats import bootstrap
|
34 |
from scipy.stats._warnings_errors import DegenerateDataWarning
|
35 |
|
|
|
41 |
NonPositionalField,
|
42 |
OptionalField,
|
43 |
)
|
|
|
44 |
from .deprecation_utils import deprecation
|
45 |
+
from .error_utils import Documentation, UnitxtError, UnitxtWarning
|
46 |
from .inference import (
|
47 |
HFPipelineBasedInferenceEngine,
|
48 |
InferenceEngine,
|
49 |
+
LogProbInferenceEngine,
|
50 |
TorchDeviceMixin,
|
51 |
WMLInferenceEngineGeneration,
|
52 |
)
|
|
|
63 |
from .operators import ArtifactFetcherMixin, Copy, Set
|
64 |
from .random_utils import get_seed
|
65 |
from .settings_utils import get_settings
|
66 |
+
from .sql_utils import get_db_connector
|
67 |
from .stream import MultiStream, Stream
|
68 |
from .type_utils import Type, isoftype, parse_type_string, to_type_string
|
69 |
from .utils import deep_copy, recursive_copy
|
70 |
|
71 |
+
FINQA_HASH = "42430b8613082bb4b85d49210284135d"
|
72 |
+
|
73 |
logger = get_logger()
|
74 |
settings = get_settings()
|
75 |
|
76 |
warnings.filterwarnings("ignore", category=DegenerateDataWarning)
|
77 |
|
78 |
+
def hf_evaluate_load(path: str, *args, **kwargs):
|
79 |
+
if settings.hf_offline_metrics_path is not None:
|
80 |
+
path = os.path.join(settings.hf_offline_metrics_path, path)
|
81 |
+
return evaluate.load(
|
82 |
+
path,
|
83 |
+
*args,
|
84 |
+
**kwargs,
|
85 |
+
experiment_id=str(uuid.uuid4()),
|
86 |
+
download_config=DownloadConfig(
|
87 |
+
max_retries=settings.loaders_max_retries,
|
88 |
+
),
|
89 |
+
verification_mode="no_checks",
|
90 |
+
trust_remote_code=settings.allow_unverified_code,
|
91 |
+
download_mode= "force_redownload" if settings.disable_hf_datasets_cache else "reuse_dataset_if_exists"
|
92 |
+
)
|
93 |
|
94 |
class MetricsList(ListCollection):
|
95 |
def verify(self):
|
|
|
408 |
return result
|
409 |
|
410 |
|
|
|
|
|
411 |
IntermediateType = TypeVar("IntermediateType")
|
412 |
PredictionType = TypeVar("PredictionType")
|
413 |
|
|
|
1807 |
@staticmethod
|
1808 |
@lru_cache(maxsize=10000)
|
1809 |
def exact_match(pred, gt):
|
1810 |
+
"""Brought from MMStar."""
|
1811 |
answer = gt.lower().strip().replace("\n", " ")
|
1812 |
predict = pred.lower().strip().replace("\n", " ")
|
1813 |
try:
|
|
|
1922 |
return_dict["relaxed_human_split"].append(score)
|
1923 |
else:
|
1924 |
return_dict["relaxed_augmented_split"].append(score)
|
1925 |
+
return {
|
1926 |
key: sum(value) / len(value)
|
1927 |
for key, value in return_dict.items()
|
1928 |
if len(value) > 0
|
1929 |
}
|
|
|
1930 |
|
1931 |
@staticmethod
|
1932 |
def _to_float(text: str):
|
|
|
2037 |
string = string.lower()
|
2038 |
|
2039 |
# strip leading and trailing whitespaces
|
2040 |
+
return string.strip()
|
|
|
|
|
2041 |
|
2042 |
def _tokenize(text):
|
2043 |
# Regex pattern to match words and isolate punctuation
|
2044 |
pattern = r"\w+|[^\w\s]"
|
2045 |
+
return re.findall(pattern, text)
|
|
|
2046 |
|
2047 |
def _compute_f1(sa, sb):
|
2048 |
sa = _normalize_str(sa)
|
|
|
2060 |
comm = sa.intersection(sb)
|
2061 |
prec = len(comm) / len(sb)
|
2062 |
rec = len(comm) / len(sa)
|
2063 |
+
return 2 * prec * rec / (prec + rec) if prec + rec > 0 else 0
|
|
|
2064 |
|
2065 |
judge_list = []
|
2066 |
for sample in samples:
|
|
|
2325 |
def prepare(self):
|
2326 |
super().prepare()
|
2327 |
|
2328 |
+
self.metric = hf_evaluate_load(self.hf_metric_name)
|
|
|
|
|
2329 |
|
2330 |
def compute(
|
2331 |
self,
|
|
|
2400 |
def prepare(self):
|
2401 |
super().prepare()
|
2402 |
|
2403 |
+
self.metric = hf_evaluate_load(self.hf_metric_name)
|
|
|
|
|
2404 |
|
2405 |
def compute(
|
2406 |
self,
|
|
|
2445 |
def prepare(self):
|
2446 |
super().prepare()
|
2447 |
|
2448 |
+
self.metric = hf_evaluate_load(self.hf_metric_name)
|
|
|
|
|
2449 |
|
2450 |
def compute(self, references: List[Any], prediction: Any, task_data: Dict) -> dict:
|
2451 |
# invokes module.compute, which invokes, e.g., meteor's _compute
|
|
|
2480 |
|
2481 |
nltk.download("wordnet", quiet=True)
|
2482 |
nltk.download("omw-1.4", quiet=True)
|
2483 |
+
nltk.download("punkt", quiet=True)
|
2484 |
+
nltk.download("punkt_tab", quiet=True)
|
2485 |
from nltk import word_tokenize
|
2486 |
from nltk.translate import meteor_score
|
2487 |
|
|
|
2549 |
def prepare(self):
|
2550 |
super().prepare()
|
2551 |
|
2552 |
+
self._metric = hf_evaluate_load(self.metric)
|
2553 |
|
2554 |
def get_str_id(self, str):
|
2555 |
if str not in self.str_to_id:
|
|
|
2826 |
def prepare(self):
|
2827 |
super().prepare()
|
2828 |
|
2829 |
+
self._metric = hf_evaluate_load(
|
2830 |
+
self.metric, "multilabel"
|
2831 |
)
|
2832 |
|
2833 |
def add_str_to_id(self, str):
|
|
|
3462 |
|
3463 |
def prepare(self):
|
3464 |
super().prepare()
|
3465 |
+
self.bertscore = None
|
|
|
|
|
3466 |
|
3467 |
def map_stream(
|
3468 |
self, evaluation_inputs_stream: Generator[EvaluationInput[str], None, None]
|
3469 |
):
|
3470 |
+
from evaluate import load
|
3471 |
+
|
3472 |
+
if self.bertscore is None:
|
3473 |
+
self.bertscore = load("bertscore", experiment_id=str(uuid.uuid4()))
|
3474 |
+
|
3475 |
predictions = []
|
3476 |
references = []
|
3477 |
for prediction, reference, _ in evaluation_inputs_stream:
|
|
|
3517 |
|
3518 |
def prepare(self):
|
3519 |
super().prepare()
|
3520 |
+
self.model = None
|
|
|
|
|
3521 |
|
3522 |
def map_stream(
|
3523 |
self, evaluation_inputs_stream: Generator[EvaluationInput, None, None]
|
3524 |
):
|
3525 |
+
from sentence_transformers import SentenceTransformer, util
|
|
|
3526 |
|
3527 |
+
if self.model is None:
|
3528 |
+
self.model = SentenceTransformer(
|
3529 |
+
self.model_name, device=self.get_device_id()
|
3530 |
+
)
|
3531 |
|
3532 |
scores = []
|
3533 |
|
|
|
3575 |
|
3576 |
def prepare(self):
|
3577 |
super().prepare()
|
3578 |
+
self.model = None
|
|
|
|
|
|
|
|
|
3579 |
|
3580 |
def map_stream(
|
3581 |
self, evaluation_inputs_stream: Generator[EvaluationInput[str], None, None]
|
3582 |
):
|
3583 |
+
if settings.mock_inference_mode:
|
3584 |
+
return [0.5 for _ in evaluation_inputs_stream]
|
3585 |
+
|
3586 |
+
from transformers import pipeline
|
3587 |
+
|
3588 |
+
if self.model is None:
|
3589 |
+
self.model = pipeline(
|
3590 |
+
"text-classification",
|
3591 |
+
model=self.model_name,
|
3592 |
+
device=self.get_device(),
|
3593 |
+
)
|
3594 |
+
|
3595 |
inputs = []
|
3596 |
for prediction, references, _ in evaluation_inputs_stream:
|
3597 |
inputs.append({"text": references[0], "text_pair": prediction})
|
|
|
3621 |
from transformers import pipeline
|
3622 |
|
3623 |
device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
3624 |
+
model_path = self.model_name
|
3625 |
+
if settings.hf_offline_models_path is not None:
|
3626 |
+
model_path = os.path.join(settings.hf_offline_models_path, model_path)
|
3627 |
self.pipe = pipeline(
|
3628 |
+
"text-classification", model=model_path, device=device,
|
3629 |
)
|
3630 |
|
3631 |
def compute(
|
|
|
3657 |
super().prepare()
|
3658 |
from transformers import AutoModelForSequenceClassification, AutoTokenizer
|
3659 |
|
3660 |
+
model_path = self.model_name
|
3661 |
+
if settings.hf_offline_models_path is not None:
|
3662 |
+
model_path = os.path.join(settings.hf_offline_models_path, model_path)
|
3663 |
self.regard_model = AutoModelForSequenceClassification.from_pretrained(
|
3664 |
+
model_path,
|
3665 |
+
|
3666 |
)
|
3667 |
+
self.regard_tokenizer = AutoTokenizer.from_pretrained(model_path)
|
3668 |
|
3669 |
def _evaluate(self, predictions, inputs):
|
3670 |
import torch
|
|
|
3847 |
super().prepare()
|
3848 |
from transformers import pipeline
|
3849 |
|
3850 |
+
model_path = self.reward_name
|
3851 |
+
if settings.hf_offline_models_path is not None:
|
3852 |
+
model_path = os.path.join(settings.hf_offline_models_path, model_path)
|
3853 |
+
|
3854 |
if not settings.mock_inference_mode:
|
3855 |
self.model = pipeline(
|
3856 |
"text-classification",
|
3857 |
+
model=model_path,
|
3858 |
device=self.get_device(),
|
3859 |
)
|
3860 |
|
|
|
4038 |
if self.lm is None:
|
4039 |
from transformers import AutoConfig
|
4040 |
|
4041 |
+
model_path = self.model_name
|
4042 |
+
if settings.hf_offline_models_path is not None:
|
4043 |
+
model_path = os.path.join(settings.hf_offline_models_path, model_path)
|
4044 |
+
|
4045 |
+
config = AutoConfig.from_pretrained(model_path, trust_remote_code=True)
|
4046 |
self.lm = (
|
4047 |
self.EncoderDecoderLM(
|
4048 |
model_name=self.model_name, single_token_mode=self.single_token_mode
|
|
|
4120 |
|
4121 |
self.model_name = model_name
|
4122 |
self.device = "cuda:0" if torch.cuda.is_available() else "cpu"
|
4123 |
+
model_path = self.model_name
|
4124 |
+
if settings.hf_offline_models_path is not None:
|
4125 |
+
model_path = os.path.join(settings.hf_offline_models_path, model_path)
|
4126 |
self.model = (
|
4127 |
+
self.model_class().from_pretrained(model_path).to(self.device)
|
4128 |
)
|
4129 |
+
self.tokenizer = AutoTokenizer.from_pretrained(model_path)
|
4130 |
if self.tokenizer.pad_token_id is None:
|
4131 |
self.tokenizer.pad_token_id = self.tokenizer.eos_token_id
|
4132 |
self.single_token_mode = single_token_mode
|
|
|
4291 |
batch_size: int = 2
|
4292 |
model_name: str = "vectara/hallucination_evaluation_model"
|
4293 |
prediction_type = str
|
4294 |
+
# single_reference_per_prediction = True
|
4295 |
max_context_words = 4096
|
4296 |
reduction_map = {"mean": [main_score]}
|
4297 |
|
|
|
4308 |
else:
|
4309 |
device = "cpu"
|
4310 |
from transformers import AutoModelForSequenceClassification
|
4311 |
+
model_path = self.model_name
|
4312 |
+
if settings.hf_offline_models_path is not None:
|
4313 |
+
model_path = os.path.join(settings.hf_offline_models_path, model_path)
|
4314 |
self.model = AutoModelForSequenceClassification.from_pretrained(
|
4315 |
+
model_path, trust_remote_code=True
|
4316 |
).to(device)
|
4317 |
|
4318 |
def compute(
|
|
|
4325 |
|
4326 |
# treat the references as the contexts and the predictions as answers
|
4327 |
# concat references
|
4328 |
+
|
4329 |
+
contexts = ["\n".join([str(r) for r in refs]) for refs in references]
|
4330 |
contexts = [" ".join(c.split(" ")[: self.max_context_words]) for c in contexts]
|
4331 |
answers = predictions
|
4332 |
|
|
|
5671 |
|
5672 |
def create_ensemble_scores(self, instance):
|
5673 |
score = self.ensemble(instance)
|
5674 |
+
instance["prediction"] = (
|
5675 |
+
score # We use here the prediction field to pass the score to the compute method.
|
5676 |
+
)
|
5677 |
return instance
|
5678 |
|
5679 |
def ensemble(self, instance):
|
|
|
5886 |
return {self.main_score: [len(prediction)], "score_name": self.main_score}
|
5887 |
|
5888 |
|
5889 |
+
class RiskType(str, Enum):
|
5890 |
+
"""Risk type for the Granite Guardian models."""
|
5891 |
+
|
5892 |
+
RAG = "rag_risk"
|
5893 |
+
USER_MESSAGE = "user_risk"
|
5894 |
+
ASSISTANT_MESSAGE = "assistant_risk"
|
5895 |
+
AGENTIC = "agentic_risk"
|
5896 |
+
CUSTOM_RISK = "custom_risk"
|
5897 |
+
|
5898 |
+
|
5899 |
+
class GraniteGuardianBase(InstanceMetric):
|
5900 |
"""Return metric for different kinds of "risk" from the Granite-3.0 Guardian model."""
|
5901 |
|
|
|
5902 |
reduction_map: Dict[str, List[str]] = None
|
5903 |
prediction_type = float
|
5904 |
+
main_score = None
|
5905 |
+
reduction_map = {}
|
5906 |
+
wml_model_name: str = "ibm/granite-guardian-3-8b"
|
5907 |
+
hf_model_name: str = "ibm-granite/granite-guardian-3.1-8b"
|
5908 |
+
|
5909 |
+
wml_params = {
|
5910 |
+
"decoding_method": "greedy",
|
5911 |
+
"max_new_tokens": 20,
|
5912 |
+
"temperature": 0,
|
5913 |
+
"return_options": {
|
5914 |
+
"top_n_tokens": 5,
|
5915 |
+
"input_text": True,
|
5916 |
+
"input_tokens": False,
|
5917 |
+
},
|
5918 |
+
}
|
5919 |
|
|
|
|
|
5920 |
safe_token = "No"
|
5921 |
unsafe_token = "Yes"
|
5922 |
|
5923 |
+
inference_engine: LogProbInferenceEngine = None
|
5924 |
generation_params: Dict = None
|
5925 |
risk_name: str = None
|
5926 |
+
risk_type: RiskType = None
|
5927 |
+
risk_definition: Optional[str] = None
|
5928 |
+
|
5929 |
+
user_message_field: str = "user"
|
5930 |
+
assistant_message_field: str = "assistant"
|
5931 |
+
context_field: str = "context"
|
5932 |
+
tools_field: str = "tools"
|
5933 |
+
|
5934 |
+
available_risks: Dict[RiskType, List[str]] = {
|
5935 |
+
RiskType.USER_MESSAGE: [
|
5936 |
+
"harm",
|
5937 |
+
"social_bias",
|
5938 |
+
"jailbreak",
|
5939 |
+
"violence",
|
5940 |
+
"profanity",
|
5941 |
+
"unethical_behavior",
|
5942 |
+
],
|
5943 |
+
RiskType.ASSISTANT_MESSAGE: [
|
5944 |
+
"harm",
|
5945 |
+
"social_bias",
|
5946 |
+
"violence",
|
5947 |
+
"profanity",
|
5948 |
+
"unethical_behavior",
|
5949 |
+
],
|
5950 |
+
RiskType.RAG: ["context_relevance", "groundedness", "answer_relevance"],
|
5951 |
+
RiskType.AGENTIC: ["function_call"],
|
5952 |
+
}
|
5953 |
|
5954 |
+
_requirements_list: List[str] = ["torch", "transformers"]
|
5955 |
|
5956 |
def prepare(self):
|
|
|
|
|
|
|
5957 |
from transformers import AutoTokenizer
|
5958 |
+
if not isinstance(self.risk_type, RiskType):
|
5959 |
+
self.risk_type = RiskType[self.risk_type]
|
5960 |
if not hasattr(self, "_tokenizer") or self._tokenizer is None:
|
5961 |
+
model_path = self.hf_model_name
|
5962 |
+
if settings.hf_offline_models_path is not None:
|
5963 |
+
model_path = os.path.join(settings.hf_offline_models_path, model_path)
|
5964 |
self._tokenizer = AutoTokenizer.from_pretrained(self.hf_model_name)
|
|
|
|
|
|
|
|
|
|
|
|
|
5965 |
|
5966 |
+
def verify(self):
|
5967 |
+
super().verify()
|
5968 |
+
assert (
|
5969 |
+
self.risk_type == RiskType.CUSTOM_RISK
|
5970 |
+
or self.risk_name in self.available_risks[self.risk_type]
|
5971 |
+
), UnitxtError(
|
5972 |
+
f"The risk '{self.risk_name}' is not a valid '{' '.join([word[0].upper() + word[1:] for word in self.risk_type.split('_')])}'"
|
5973 |
+
)
|
5974 |
+
|
5975 |
+
@abstractmethod
|
5976 |
+
def verify_granite_guardian_config(self, task_data):
|
5977 |
+
pass
|
5978 |
+
|
5979 |
+
@abstractmethod
|
5980 |
+
def process_input_fields(self, task_data):
|
5981 |
+
pass
|
5982 |
+
|
5983 |
+
@classmethod
|
5984 |
+
def get_available_risk_names(cls):
|
5985 |
+
return cls.available_risks[cls.risk_type]
|
5986 |
+
|
5987 |
+
def set_main_score(self):
|
5988 |
+
self.main_score = self.risk_name
|
5989 |
+
self.reduction_map = {"mean": [self.main_score]}
|
5990 |
+
|
5991 |
+
def get_prompt(self, messages):
|
5992 |
guardian_config = {"risk_name": self.risk_name}
|
5993 |
+
if self.risk_type == RiskType.CUSTOM_RISK:
|
5994 |
+
guardian_config["risk_definition"] = self.risk_definition
|
5995 |
+
|
5996 |
+
return self._tokenizer.apply_chat_template(
|
5997 |
messages,
|
5998 |
guardian_config=guardian_config,
|
5999 |
tokenize=False,
|
6000 |
add_generation_prompt=True,
|
6001 |
)
|
6002 |
|
6003 |
+
def compute(self, references: List[Any], prediction: Any, task_data: Dict) -> dict:
|
6004 |
+
self.verify_granite_guardian_config(task_data)
|
6005 |
+
self.set_main_score()
|
6006 |
+
|
6007 |
+
if self.inference_engine is None:
|
6008 |
+
self.inference_engine = WMLInferenceEngineGeneration(
|
6009 |
+
model_name=self.wml_model_name,
|
6010 |
+
**self.wml_params,
|
6011 |
+
)
|
6012 |
+
logger.debug(
|
6013 |
+
f'Risk type is "{self.risk_type}" and risk name is "{self.risk_name}"'
|
6014 |
)
|
6015 |
+
messages = self.process_input_fields(task_data)
|
6016 |
+
prompt = self.get_prompt(messages)
|
6017 |
+
result = self.inference_engine.infer_log_probs([{"source": prompt}])
|
6018 |
+
generated_tokens_list = result[0]
|
6019 |
label, prob_of_risk = self.parse_output(generated_tokens_list)
|
6020 |
+
confidence_score = (
|
6021 |
+
(prob_of_risk if prob_of_risk > 0.5 else 1 - prob_of_risk)
|
6022 |
+
if label is not None
|
6023 |
+
else np.nan
|
6024 |
+
)
|
6025 |
+
result = {
|
6026 |
+
self.main_score: prob_of_risk,
|
6027 |
+
f"{self.main_score}_prob_of_risk": prob_of_risk,
|
6028 |
+
f"{self.main_score}_certainty": confidence_score,
|
6029 |
+
f"{self.main_score}_label": label,
|
6030 |
+
}
|
6031 |
+
logger.debug(f"Results are ready:\n{result}")
|
6032 |
+
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
6033 |
|
6034 |
+
def create_message(self, role: str, content: str) -> List[Dict[str, str]]:
|
6035 |
+
return [{"role": role, "content": content}]
|
6036 |
|
6037 |
def parse_output(self, generated_tokens_list):
|
6038 |
top_tokens_list = [
|
|
|
6071 |
).numpy()
|
6072 |
|
6073 |
|
6074 |
+
class GraniteGuardianUserRisk(GraniteGuardianBase):
|
6075 |
+
risk_type = RiskType.USER_MESSAGE
|
6076 |
+
|
6077 |
+
def verify_granite_guardian_config(self, task_data):
|
6078 |
+
# User message risks only require the user message field and are the same as the assistant message risks, except for jailbreak
|
6079 |
+
assert self.user_message_field in task_data, UnitxtError(
|
6080 |
+
f'Task data must contain "{self.user_message_field}" field'
|
6081 |
+
)
|
6082 |
+
|
6083 |
+
def process_input_fields(self, task_data):
|
6084 |
+
messages = []
|
6085 |
+
messages += self.create_message("user", task_data[self.user_message_field])
|
6086 |
+
return messages
|
6087 |
+
|
6088 |
+
|
6089 |
+
class GraniteGuardianAssistantRisk(GraniteGuardianBase):
|
6090 |
+
risk_type = RiskType.ASSISTANT_MESSAGE
|
6091 |
+
|
6092 |
+
def verify_granite_guardian_config(self, task_data):
|
6093 |
+
assert (
|
6094 |
+
self.assistant_message_field in task_data
|
6095 |
+
and self.user_message_field in task_data
|
6096 |
+
), UnitxtError(
|
6097 |
+
f'Task data must contain "{self.assistant_message_field}" and "{self.user_message_field}" fields'
|
6098 |
+
)
|
6099 |
+
|
6100 |
+
def process_input_fields(self, task_data):
|
6101 |
+
messages = []
|
6102 |
+
messages += self.create_message("user", task_data[self.user_message_field])
|
6103 |
+
messages += self.create_message(
|
6104 |
+
"assistant", task_data[self.assistant_message_field]
|
6105 |
+
)
|
6106 |
+
return messages
|
6107 |
+
|
6108 |
+
|
6109 |
+
class GraniteGuardianRagRisk(GraniteGuardianBase):
|
6110 |
+
risk_type = RiskType.RAG
|
6111 |
+
|
6112 |
+
def verify_granite_guardian_config(self, task_data):
|
6113 |
+
if self.risk_name == "context_relevance":
|
6114 |
+
assert (
|
6115 |
+
self.context_field in task_data and self.user_message_field in task_data
|
6116 |
+
), UnitxtError(
|
6117 |
+
f'Task data must contain "{self.context_field}" and "{self.user_message_field}" fields'
|
6118 |
+
)
|
6119 |
+
elif self.risk_name == "groundedness":
|
6120 |
+
assert (
|
6121 |
+
self.context_field in task_data
|
6122 |
+
and self.assistant_message_field in task_data
|
6123 |
+
), UnitxtError(
|
6124 |
+
f'Task data must contain "{self.context_field}" and "{self.assistant_message_field}" fields'
|
6125 |
+
)
|
6126 |
+
elif self.risk_name == "answer_relevance":
|
6127 |
+
assert (
|
6128 |
+
self.user_message_field in task_data
|
6129 |
+
and self.assistant_message_field in task_data
|
6130 |
+
), UnitxtError(
|
6131 |
+
f'Task data must contain "{self.user_message_field}" and "{self.assistant_message_field}" fields'
|
6132 |
+
)
|
6133 |
+
|
6134 |
+
def process_input_fields(self, task_data):
|
6135 |
+
messages = []
|
6136 |
+
if self.risk_name == "context_relevance":
|
6137 |
+
messages += self.create_message("user", task_data[self.user_message_field])
|
6138 |
+
messages += self.create_message("context", task_data[self.context_field])
|
6139 |
+
elif self.risk_name == "groundedness":
|
6140 |
+
messages += self.create_message("context", task_data[self.context_field])
|
6141 |
+
messages += self.create_message(
|
6142 |
+
"assistant", task_data[self.assistant_message_field]
|
6143 |
+
)
|
6144 |
+
elif self.risk_name == "answer_relevance":
|
6145 |
+
messages += self.create_message("user", task_data[self.user_message_field])
|
6146 |
+
messages += self.create_message(
|
6147 |
+
"assistant", task_data[self.assistant_message_field]
|
6148 |
+
)
|
6149 |
+
return messages
|
6150 |
+
|
6151 |
+
|
6152 |
+
class GraniteGuardianAgenticRisk(GraniteGuardianBase):
|
6153 |
+
risk_type = RiskType.AGENTIC
|
6154 |
+
|
6155 |
+
def verify_granite_guardian_config(self, task_data):
|
6156 |
+
assert (
|
6157 |
+
self.tools_field in task_data
|
6158 |
+
and self.user_message_field in task_data
|
6159 |
+
and self.assistant_message_field in task_data
|
6160 |
+
), UnitxtError(
|
6161 |
+
f'Task data must contain "{self.tools_field}", "{self.assistant_message_field}" and "{self.user_message_field}" fields'
|
6162 |
+
)
|
6163 |
+
|
6164 |
+
def process_input_fields(self, task_data):
|
6165 |
+
messages = []
|
6166 |
+
messages += self.create_message(
|
6167 |
+
"tools", json.loads(task_data[self.tools_field])
|
6168 |
+
)
|
6169 |
+
messages += self.create_message("user", task_data[self.user_message_field])
|
6170 |
+
messages += self.create_message(
|
6171 |
+
"assistant", task_data[self.assistant_message_field]
|
6172 |
+
)
|
6173 |
+
return messages
|
6174 |
+
|
6175 |
+
|
6176 |
+
class GraniteGuardianCustomRisk(GraniteGuardianBase):
|
6177 |
+
risk_type = RiskType.CUSTOM_RISK
|
6178 |
+
|
6179 |
+
def verify(self):
|
6180 |
+
super().verify()
|
6181 |
+
assert self.risk_type is not None, UnitxtError(
|
6182 |
+
"In a custom risk, risk_type must be defined"
|
6183 |
+
)
|
6184 |
+
|
6185 |
+
def verify_granite_guardian_config(self, task_data):
|
6186 |
+
# even though this is a custom risks, we will limit the
|
6187 |
+
# message roles to be a subset of the roles Granite Guardian
|
6188 |
+
# was trained with: user, assistant, context & tools.
|
6189 |
+
# we just checked whether at least one of them is provided
|
6190 |
+
assert (
|
6191 |
+
self.tools_field in task_data
|
6192 |
+
or self.user_message_field in task_data
|
6193 |
+
or self.assistant_message_field in task_data
|
6194 |
+
or self.context_field in task_data
|
6195 |
+
), UnitxtError(
|
6196 |
+
f'Task data must contain at least one of"{self.tools_field}", "{self.assistant_message_field}", "{self.user_message_field}" or "{self.context_field}" fields'
|
6197 |
+
)
|
6198 |
+
|
6199 |
+
def process_input_fields(self, task_data):
|
6200 |
+
messages = []
|
6201 |
+
if self.context_field in task_data:
|
6202 |
+
messages += self.create_message("context", task_data[self.context_field])
|
6203 |
+
if self.tools_field in task_data:
|
6204 |
+
messages += self.create_message(
|
6205 |
+
"tools", json.loads(task_data[self.tools_field])
|
6206 |
+
)
|
6207 |
+
if self.user_message_field in task_data:
|
6208 |
+
messages += self.create_message("user", task_data[self.user_message_field])
|
6209 |
+
if self.assistant_message_field in task_data:
|
6210 |
+
messages += self.create_message(
|
6211 |
+
"assistant", task_data[self.assistant_message_field]
|
6212 |
+
)
|
6213 |
+
return messages
|
6214 |
+
|
6215 |
+
|
6216 |
+
RISK_TYPE_TO_CLASS: Dict[RiskType, GraniteGuardianBase] = {
|
6217 |
+
RiskType.USER_MESSAGE: GraniteGuardianUserRisk,
|
6218 |
+
RiskType.ASSISTANT_MESSAGE: GraniteGuardianAssistantRisk,
|
6219 |
+
RiskType.RAG: GraniteGuardianRagRisk,
|
6220 |
+
RiskType.AGENTIC: GraniteGuardianAgenticRisk,
|
6221 |
+
RiskType.CUSTOM_RISK: GraniteGuardianCustomRisk,
|
6222 |
+
}
|
6223 |
+
|
6224 |
+
|
6225 |
+
class SQLExecutionAccuracy(InstanceMetric):
|
6226 |
+
reduction_map = {
|
6227 |
+
"mean": [
|
6228 |
+
"execution_accuracy",
|
6229 |
+
"non_empty_execution_accuracy",
|
6230 |
+
"subset_non_empty_execution_result",
|
6231 |
+
"non_empty_gold_df",
|
6232 |
+
"gold_sql_runtime",
|
6233 |
+
"predicted_sql_runtime",
|
6234 |
+
"pred_to_gold_runtime_ratio",
|
6235 |
+
"gold_error",
|
6236 |
+
"predicted_error",
|
6237 |
+
]
|
6238 |
+
}
|
6239 |
+
main_score = "non_empty_execution_accuracy"
|
6240 |
+
ci_scores = [
|
6241 |
+
"execution_accuracy",
|
6242 |
+
"non_empty_execution_accuracy",
|
6243 |
+
"subset_non_empty_execution_result",
|
6244 |
+
"gold_sql_runtime",
|
6245 |
+
"predicted_sql_runtime",
|
6246 |
+
]
|
6247 |
|
6248 |
prediction_type = "Any" # string representation is compared
|
6249 |
sql_timeout = 100.0
|
|
|
6251 |
_requirements_list = ["sqlglot", "func_timeout"]
|
6252 |
|
6253 |
@staticmethod
|
6254 |
+
def compare_dfs_ignore_colnames(df1, df2):
|
6255 |
+
"""Compares two DataFrames based on row content, ignoring column names.
|
|
|
6256 |
|
6257 |
+
Args:
|
6258 |
+
df1 (pd.DataFrame): Pandas DataFrame 1 to compare.
|
6259 |
+
df2 (pd.DataFrame): Pandas DataFrame 2 to compare.
|
|
|
|
|
6260 |
|
6261 |
+
Returns:
|
6262 |
+
True if the DataFrames have the same content (ignoring column names),
|
6263 |
+
False otherwise.
|
6264 |
+
"""
|
6265 |
+
df1.fillna(0, inplace=True)
|
6266 |
+
df2.fillna(0, inplace=True)
|
6267 |
|
6268 |
+
if df1.shape != df2.shape:
|
6269 |
+
return False
|
6270 |
+
|
6271 |
+
# run over all columns of d11,
|
6272 |
+
# and see if there is a columns in df2 that matches it,
|
6273 |
+
# if not return False, if all the columns worked return tue
|
6274 |
+
for df1_col in df1.columns:
|
6275 |
+
col_matched = False
|
6276 |
+
for df2_col in df2.columns:
|
6277 |
+
if all(df1[df1_col].values == df2[df2_col].values):
|
6278 |
+
col_matched = True
|
6279 |
+
if not col_matched:
|
6280 |
+
return False
|
6281 |
+
|
6282 |
+
return True
|
6283 |
+
|
6284 |
+
@staticmethod
|
6285 |
+
def is_subset_ignore_colnames(df1, df2):
|
6286 |
+
"""Checks if df1 is a subset of df2 based on row content, ignoring column names.
|
6287 |
+
|
6288 |
+
Args:
|
6289 |
+
df1: Pandas DataFrame 1 to compare.
|
6290 |
+
df2: Pandas DataFrame 2 to compare.
|
6291 |
+
|
6292 |
+
Returns:
|
6293 |
+
True if df1 is a subset of df2 based on column values,
|
6294 |
+
False otherwise.
|
6295 |
+
"""
|
6296 |
+
if df1.empty or df2.empty or df1.shape[1] > df2.shape[1]:
|
6297 |
+
return False
|
6298 |
+
|
6299 |
+
def make_hashable(value):
|
6300 |
+
if isinstance(value, dict):
|
6301 |
+
return json.dumps(value, sort_keys=True)
|
6302 |
+
if isinstance(value, list):
|
6303 |
+
return tuple(value)
|
6304 |
+
return value
|
6305 |
+
|
6306 |
+
df1_cols = [
|
6307 |
+
tuple(make_hashable(value) for value in df1.iloc[:, i])
|
6308 |
+
for i in range(df1.shape[1])
|
6309 |
+
]
|
6310 |
+
df2_cols = [
|
6311 |
+
tuple(make_hashable(value) for value in df2.iloc[:, j])
|
6312 |
+
for j in range(df2.shape[1])
|
6313 |
+
]
|
6314 |
+
df2_cols_count = Counter(df2_cols)
|
6315 |
+
for col in df1_cols:
|
6316 |
+
if df2_cols_count[col] > 0:
|
6317 |
+
df2_cols_count[col] -= 1
|
6318 |
+
else:
|
6319 |
+
return False
|
6320 |
+
|
6321 |
+
return True
|
6322 |
+
|
6323 |
+
def get_sql_execution_results(
|
6324 |
+
self, predicted_sql: str, gold_sql: str, connector
|
6325 |
+
) -> (int, int, int, int, int, int, int, int, int, str, str, str):
|
6326 |
+
"""Runs SQL queries using the provided connector and gets scores and results.
|
6327 |
+
|
6328 |
+
Args:
|
6329 |
+
predicted_sql (str): predicted SQL query
|
6330 |
+
gold_sql (str): gold reference SQL query
|
6331 |
+
connector: database connector
|
6332 |
|
6333 |
+
Returns:
|
6334 |
+
a 12-tuple of
|
6335 |
+
1. execution_result: if df responses match
|
6336 |
+
2. non_empty_execution_result: if dfs are non-empty and match
|
6337 |
+
3. subset_non_empty_execution_result: if non-empty dfs and gt df subset of predicted df
|
6338 |
+
4. non_empty_gold_df: if gt df is non-empty
|
6339 |
+
5. gold_sql_runtime: ground truth query runtime
|
6340 |
+
6. predicted_sql_runtime: predicted query runtime
|
6341 |
+
7. pred_to_gold_runtime_ratio: ratio of predicted query runtime to gt query runtime
|
6342 |
+
8. gold_error: if gt has an error
|
6343 |
+
9. predicted_error: if predicted query has an error
|
6344 |
+
10. ground truth dataframe
|
6345 |
+
11. predicted query's dataframe
|
6346 |
+
12. error message (if any)
|
6347 |
+
"""
|
6348 |
+
import time
|
6349 |
+
|
6350 |
+
from func_timeout import func_timeout
|
6351 |
+
|
6352 |
+
from .sql_utils import sqlglot_optimized_equivalence
|
6353 |
+
|
6354 |
+
gold_res = None
|
6355 |
+
gold_error = ""
|
6356 |
+
gold_sql_runtime = 0
|
6357 |
try:
|
6358 |
+
start_time = time.perf_counter()
|
6359 |
+
gold_res, gold_error = func_timeout(
|
6360 |
+
self.sql_timeout,
|
6361 |
+
connector.execute_query,
|
6362 |
+
args=(gold_sql,),
|
6363 |
+
)
|
6364 |
+
end_time = time.perf_counter()
|
6365 |
+
gold_sql_runtime = end_time - start_time
|
6366 |
+
except Exception as e:
|
6367 |
+
gold_error = f"Error executing gold SQL: {e}"
|
6368 |
+
if gold_error is not None:
|
6369 |
+
return (
|
6370 |
+
0,
|
6371 |
+
0,
|
6372 |
+
0,
|
6373 |
+
0,
|
6374 |
+
gold_sql_runtime,
|
6375 |
+
0,
|
6376 |
+
0,
|
6377 |
+
0,
|
6378 |
+
0,
|
6379 |
+
"",
|
6380 |
+
"",
|
6381 |
+
gold_error,
|
6382 |
+
)
|
6383 |
+
|
6384 |
+
gold_df = pd.DataFrame(gold_res)
|
6385 |
+
non_empty_gold_df = 0 if gold_df.empty else 1
|
6386 |
+
|
6387 |
+
no_execution_match_result = (
|
6388 |
+
1,
|
6389 |
+
non_empty_gold_df,
|
6390 |
+
non_empty_gold_df,
|
6391 |
+
non_empty_gold_df,
|
6392 |
+
gold_sql_runtime,
|
6393 |
+
0,
|
6394 |
+
0,
|
6395 |
+
1,
|
6396 |
+
0,
|
6397 |
+
gold_df.to_json(),
|
6398 |
+
gold_df.to_json(),
|
6399 |
+
"",
|
6400 |
+
)
|
6401 |
+
if predicted_sql.lower().strip() == gold_sql.lower().strip():
|
6402 |
+
return no_execution_match_result
|
6403 |
+
try:
|
6404 |
+
if sqlglot_optimized_equivalence(gold_sql, predicted_sql):
|
6405 |
+
return no_execution_match_result
|
6406 |
except Exception as e: # Catch specific exceptions if possible
|
6407 |
logger.info(
|
6408 |
+
f"Couldn't test equivalent_sqls: {e}. Treating as non-equivalent and going to test with the db."
|
6409 |
)
|
6410 |
|
6411 |
+
pred_res = None
|
6412 |
+
pred_error = ""
|
6413 |
+
pred_sql_runtime = 0
|
6414 |
try:
|
6415 |
+
start_time = time.perf_counter()
|
6416 |
+
pred_res, pred_error = func_timeout(
|
6417 |
+
self.sql_timeout,
|
6418 |
+
connector.execute_query,
|
6419 |
+
args=(predicted_sql,),
|
6420 |
+
)
|
6421 |
+
end_time = time.perf_counter()
|
6422 |
+
pred_sql_runtime = end_time - start_time
|
6423 |
except Exception as e:
|
6424 |
+
pred_error = f"Error executing predicted SQL: {e}"
|
6425 |
+
logger.info(pred_error)
|
|
|
6426 |
|
6427 |
+
pred_to_gold_runtime_ratio = (
|
6428 |
+
float(pred_sql_runtime) / gold_sql_runtime if gold_sql_runtime > 0 else 0
|
6429 |
+
)
|
|
|
|
|
6430 |
|
6431 |
if pred_res is None:
|
6432 |
+
return (
|
6433 |
+
0,
|
6434 |
+
0,
|
6435 |
+
0,
|
6436 |
+
0,
|
6437 |
+
gold_sql_runtime,
|
6438 |
+
pred_sql_runtime,
|
6439 |
+
pred_to_gold_runtime_ratio,
|
6440 |
+
0,
|
6441 |
+
1,
|
6442 |
+
"",
|
6443 |
+
"",
|
6444 |
+
pred_error,
|
6445 |
+
)
|
6446 |
|
6447 |
+
predicted_df = pd.DataFrame(pred_res)
|
|
|
6448 |
|
6449 |
+
execution_result = (
|
6450 |
+
1 if self.compare_dfs_ignore_colnames(predicted_df, gold_df) else 0
|
6451 |
+
)
|
6452 |
|
6453 |
+
subset_non_empty_execution_result = 0
|
6454 |
+
non_empty_execution_result = 0
|
6455 |
+
if non_empty_gold_df:
|
6456 |
+
if execution_result == 1:
|
6457 |
+
non_empty_execution_result = 1
|
6458 |
+
if self.is_subset_ignore_colnames(gold_df, predicted_df):
|
6459 |
+
subset_non_empty_execution_result = 1
|
6460 |
|
6461 |
+
return (
|
6462 |
+
execution_result,
|
6463 |
+
non_empty_execution_result,
|
6464 |
+
subset_non_empty_execution_result,
|
6465 |
+
non_empty_gold_df,
|
6466 |
+
gold_sql_runtime,
|
6467 |
+
pred_sql_runtime,
|
6468 |
+
pred_to_gold_runtime_ratio,
|
6469 |
+
0,
|
6470 |
+
0,
|
6471 |
+
gold_df.to_json(),
|
6472 |
+
predicted_df.to_json(),
|
6473 |
+
pred_error,
|
6474 |
)
|
6475 |
|
6476 |
def compute(self, references: List[Any], prediction: str, task_data: Dict) -> dict:
|
|
|
|
|
6477 |
predicted_sql = prediction
|
6478 |
execution_result: float = 0.0
|
6479 |
|
|
|
6485 |
|
6486 |
db_connector = get_db_connector(task_data["db"]["db_type"])(task_data["db"])
|
6487 |
|
6488 |
+
logger.debug(
|
6489 |
+
f"Starting to get SQL execution results over DB: {task_data['db']}"
|
6490 |
+
)
|
6491 |
+
(
|
6492 |
+
execution_result,
|
6493 |
+
non_empty_execution_result,
|
6494 |
+
subset_non_empty_execution_result,
|
6495 |
+
non_empty_gold_df,
|
6496 |
+
gold_sql_runtime,
|
6497 |
+
predicted_sql_runtime,
|
6498 |
+
pred_to_gold_runtime_ratio,
|
6499 |
+
gold_error,
|
6500 |
+
predicted_error,
|
6501 |
+
gold_df_json,
|
6502 |
+
predicted_df_json,
|
6503 |
+
error_message,
|
6504 |
+
) = self.get_sql_execution_results(
|
6505 |
+
predicted_sql, references[0], db_connector
|
6506 |
+
)
|
6507 |
+
|
6508 |
+
result = {
|
6509 |
+
"execution_accuracy": float(execution_result),
|
6510 |
+
"non_empty_execution_accuracy": float(non_empty_execution_result),
|
6511 |
+
"subset_non_empty_execution_result": float(
|
6512 |
+
subset_non_empty_execution_result
|
6513 |
+
),
|
6514 |
+
"non_empty_gold_df": float(non_empty_gold_df),
|
6515 |
+
"gold_sql_runtime": float(gold_sql_runtime),
|
6516 |
+
"predicted_sql_runtime": float(predicted_sql_runtime),
|
6517 |
+
"pred_to_gold_runtime_ratio": float(pred_to_gold_runtime_ratio),
|
6518 |
+
"gold_error": float(gold_error),
|
6519 |
+
"predicted_error": float(predicted_error),
|
6520 |
+
"error_message": str(error_message),
|
6521 |
+
"gold_df_json": str(gold_df_json),
|
6522 |
+
"predicted_df_json": str(predicted_df_json),
|
6523 |
+
}
|
6524 |
+
result["score"] = result[self.main_score]
|
6525 |
+
result["score_name"] = self.main_score
|
6526 |
+
logger.debug(f"SQL Execution Accuracy Result: {result}")
|
6527 |
+
return result
|
6528 |
+
|
6529 |
+
|
6530 |
+
class SQLNonExecutionAccuracy(InstanceMetric):
|
6531 |
+
reduction_map = {
|
6532 |
+
"mean": [
|
6533 |
+
"sqlglot_validity",
|
6534 |
+
"sqlparse_validity",
|
6535 |
+
"sqlglot_equivalence",
|
6536 |
+
"sqlglot_optimized_equivalence",
|
6537 |
+
"sqlparse_equivalence",
|
6538 |
+
"sql_exact_match",
|
6539 |
+
]
|
6540 |
+
}
|
6541 |
+
main_score = "sqlglot_equivalence"
|
6542 |
+
ci_scores = [
|
6543 |
+
"sqlglot_validity",
|
6544 |
+
"sqlparse_validity",
|
6545 |
+
"sqlglot_equivalence",
|
6546 |
+
"sqlglot_optimized_equivalence",
|
6547 |
+
"sqlparse_equivalence",
|
6548 |
+
"sql_exact_match",
|
6549 |
+
]
|
6550 |
+
|
6551 |
+
prediction_type = "Any" # string representation is compared
|
6552 |
+
|
6553 |
+
_requirements_list = ["sqlglot", "sqlparse"]
|
6554 |
+
|
6555 |
+
def compute(self, references: List[Any], prediction: str, task_data: Dict) -> dict:
|
6556 |
+
from .sql_utils import (
|
6557 |
+
is_sqlglot_parsable,
|
6558 |
+
is_sqlparse_parsable,
|
6559 |
+
sql_exact_match,
|
6560 |
+
sqlglot_optimized_equivalence,
|
6561 |
+
sqlglot_parsed_queries_equivalent,
|
6562 |
+
sqlparse_queries_equivalent,
|
6563 |
+
)
|
6564 |
+
|
6565 |
+
predicted_sql = prediction
|
6566 |
+
gold_sql = references[0]
|
6567 |
+
|
6568 |
+
if predicted_sql and predicted_sql.strip() != "":
|
6569 |
+
if not predicted_sql.startswith("SELECT") and "SELECT" in predicted_sql:
|
6570 |
+
predicted_sql = predicted_sql[predicted_sql.find("SELECT") :]
|
6571 |
+
if ";" in predicted_sql:
|
6572 |
+
predicted_sql = predicted_sql[: predicted_sql.find(";") + 1]
|
6573 |
+
|
6574 |
+
is_sqlglot_parsable = is_sqlglot_parsable(predicted_sql)
|
6575 |
+
is_sqlparse_parsable = is_sqlparse_parsable(predicted_sql)
|
6576 |
+
result = {
|
6577 |
+
"sqlglot_validity": float(is_sqlglot_parsable),
|
6578 |
+
"sqlparse_validity": float(is_sqlparse_parsable),
|
6579 |
+
"sqlglot_equivalence": float(
|
6580 |
+
sqlglot_parsed_queries_equivalent(predicted_sql, gold_sql)
|
6581 |
+
if is_sqlglot_parsable
|
6582 |
+
else 0
|
6583 |
+
),
|
6584 |
+
"sqlglot_optimized_equivalence": float(
|
6585 |
+
sqlglot_optimized_equivalence(predicted_sql, gold_sql)
|
6586 |
+
if is_sqlglot_parsable
|
6587 |
+
else 0
|
6588 |
+
),
|
6589 |
+
"sqlparse_equivalence": float(
|
6590 |
+
sqlparse_queries_equivalent(predicted_sql, gold_sql)
|
6591 |
+
if is_sqlparse_parsable
|
6592 |
+
else 0
|
6593 |
+
),
|
6594 |
+
"sql_exact_match": float(sql_exact_match(predicted_sql, gold_sql)),
|
6595 |
+
}
|
6596 |
+
logger.debug(f"SQL Non Execution Accuracy Result: {result}")
|
6597 |
result["score"] = result[self.main_score]
|
6598 |
result["score_name"] = self.main_score
|
6599 |
return result
|
operator.py
CHANGED
@@ -5,7 +5,7 @@ from typing import Any, Dict, Generator, List, Optional, Union
|
|
5 |
from pkg_resources import DistributionNotFound, VersionConflict, require
|
6 |
|
7 |
from .artifact import Artifact
|
8 |
-
from .dataclass import InternalField, NonPositionalField
|
9 |
from .settings_utils import get_constants
|
10 |
from .stream import DynamicStream, EmptyStreamError, MultiStream, Stream
|
11 |
|
@@ -20,6 +20,7 @@ class PackageRequirementsMixin(Artifact):
|
|
20 |
"""Base class used to automatically check for the existence of required Python dependencies for an artifact (e.g., Operator or Metric).
|
21 |
|
22 |
The _requirements_list is either a list of required packages or a dictionary mapping required packages to installation instructions.
|
|
|
23 |
|
24 |
- **List format**: Just specify the package names, optionally with version annotations (e.g., ["torch>=1.2.4", "numpy<1.19"]).
|
25 |
- **Dict format**: Specify package names as keys and installation instructions as values
|
@@ -32,9 +33,13 @@ class PackageRequirementsMixin(Artifact):
|
|
32 |
_requirements_list: Union[List[str], Dict[str, str]] = InternalField(
|
33 |
default_factory=list
|
34 |
)
|
|
|
|
|
|
|
35 |
|
36 |
def prepare(self):
|
37 |
-
self.check_missing_requirements()
|
|
|
38 |
super().prepare()
|
39 |
|
40 |
def check_missing_requirements(self, requirements=None):
|
|
|
5 |
from pkg_resources import DistributionNotFound, VersionConflict, require
|
6 |
|
7 |
from .artifact import Artifact
|
8 |
+
from .dataclass import FinalField, InternalField, NonPositionalField
|
9 |
from .settings_utils import get_constants
|
10 |
from .stream import DynamicStream, EmptyStreamError, MultiStream, Stream
|
11 |
|
|
|
20 |
"""Base class used to automatically check for the existence of required Python dependencies for an artifact (e.g., Operator or Metric).
|
21 |
|
22 |
The _requirements_list is either a list of required packages or a dictionary mapping required packages to installation instructions.
|
23 |
+
The _requirements_list should be used at class level definition, and the requirements at instance creation.
|
24 |
|
25 |
- **List format**: Just specify the package names, optionally with version annotations (e.g., ["torch>=1.2.4", "numpy<1.19"]).
|
26 |
- **Dict format**: Specify package names as keys and installation instructions as values
|
|
|
33 |
_requirements_list: Union[List[str], Dict[str, str]] = InternalField(
|
34 |
default_factory=list
|
35 |
)
|
36 |
+
requirements: Union[List[str], Dict[str, str]] = FinalField(
|
37 |
+
also_positional=False, default_factory=list
|
38 |
+
)
|
39 |
|
40 |
def prepare(self):
|
41 |
+
self.check_missing_requirements(self._requirements_list)
|
42 |
+
self.check_missing_requirements(self.requirements)
|
43 |
super().prepare()
|
44 |
|
45 |
def check_missing_requirements(self, requirements=None):
|
operators.py
CHANGED
@@ -1484,28 +1484,32 @@ class IntersectCorrespondingFields(InstanceOperator):
|
|
1484 |
|
1485 |
Assume the instances contain a field of 'labels' and a field with the labels' corresponding 'positions' in the text.
|
1486 |
|
1487 |
-
|
1488 |
-
|
1489 |
-
|
|
|
|
|
1490 |
|
1491 |
would keep only "b" and "f" values in 'labels' field and
|
1492 |
their respective values in the 'position' field.
|
1493 |
(All other fields are not effected)
|
1494 |
|
1495 |
-
|
1496 |
|
1497 |
-
|
1498 |
-
{"label": ["a", "b"],"position": [0,1],"other" : "not"},
|
1499 |
-
{"label": ["a", "c", "d"], "position": [0,1,2], "other" : "relevant"},
|
1500 |
-
{"label": ["a", "b", "f"], "position": [0,1,2], "other" : "field"}
|
1501 |
-
]
|
1502 |
|
1503 |
-
|
1504 |
-
|
1505 |
-
{"label": ["
|
1506 |
-
{"label": [], "position": [], "other" : "
|
1507 |
-
|
1508 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
1509 |
|
1510 |
Args:
|
1511 |
field - the field to intersected (must contain list values)
|
@@ -2367,21 +2371,23 @@ class CollateInstancesByField(StreamOperator):
|
|
2367 |
Example:
|
2368 |
Collate the instances based on field "category" and aggregate fields "value" and "id".
|
2369 |
|
2370 |
-
|
2371 |
|
2372 |
-
|
2373 |
-
[
|
2374 |
-
{"id": 1, "category": "A", "value": 10", "flag" : True},
|
2375 |
-
{"id": 2, "category": "B", "value": 20", "flag" : False},
|
2376 |
-
{"id": 3, "category": "A", "value": 30", "flag" : True},
|
2377 |
-
{"id": 4, "category": "B", "value": 40", "flag" : False}
|
2378 |
-
]
|
2379 |
|
2380 |
-
|
2381 |
-
|
2382 |
-
|
2383 |
-
|
2384 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
2385 |
|
2386 |
Note that the "flag" field is not aggregated, and must be the same
|
2387 |
in all instances in the same category, or an error is raised.
|
@@ -2462,3 +2468,14 @@ class WikipediaFetcher(FieldOperator):
|
|
2462 |
page = self.wikipedia.page(title)
|
2463 |
|
2464 |
return {"title": page.title, "body": getattr(page, self.mode)}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1484 |
|
1485 |
Assume the instances contain a field of 'labels' and a field with the labels' corresponding 'positions' in the text.
|
1486 |
|
1487 |
+
.. code-block:: text
|
1488 |
+
|
1489 |
+
IntersectCorrespondingFields(field="label",
|
1490 |
+
allowed_values=["b", "f"],
|
1491 |
+
corresponding_fields_to_intersect=["position"])
|
1492 |
|
1493 |
would keep only "b" and "f" values in 'labels' field and
|
1494 |
their respective values in the 'position' field.
|
1495 |
(All other fields are not effected)
|
1496 |
|
1497 |
+
.. code-block:: text
|
1498 |
|
1499 |
+
Given this input:
|
|
|
|
|
|
|
|
|
1500 |
|
1501 |
+
[
|
1502 |
+
{"label": ["a", "b"],"position": [0,1],"other" : "not"},
|
1503 |
+
{"label": ["a", "c", "d"], "position": [0,1,2], "other" : "relevant"},
|
1504 |
+
{"label": ["a", "b", "f"], "position": [0,1,2], "other" : "field"}
|
1505 |
+
]
|
1506 |
+
|
1507 |
+
So the output would be:
|
1508 |
+
[
|
1509 |
+
{"label": ["b"], "position":[1],"other" : "not"},
|
1510 |
+
{"label": [], "position": [], "other" : "relevant"},
|
1511 |
+
{"label": ["b", "f"],"position": [1,2], "other" : "field"},
|
1512 |
+
]
|
1513 |
|
1514 |
Args:
|
1515 |
field - the field to intersected (must contain list values)
|
|
|
2371 |
Example:
|
2372 |
Collate the instances based on field "category" and aggregate fields "value" and "id".
|
2373 |
|
2374 |
+
.. code-block:: text
|
2375 |
|
2376 |
+
CollateInstancesByField(by_field="category", aggregate_fields=["value", "id"])
|
|
|
|
|
|
|
|
|
|
|
|
|
2377 |
|
2378 |
+
given input:
|
2379 |
+
[
|
2380 |
+
{"id": 1, "category": "A", "value": 10", "flag" : True},
|
2381 |
+
{"id": 2, "category": "B", "value": 20", "flag" : False},
|
2382 |
+
{"id": 3, "category": "A", "value": 30", "flag" : True},
|
2383 |
+
{"id": 4, "category": "B", "value": 40", "flag" : False}
|
2384 |
+
]
|
2385 |
+
|
2386 |
+
the output is:
|
2387 |
+
[
|
2388 |
+
{"category": "A", "id": [1, 3], "value": [10, 30], "info": True},
|
2389 |
+
{"category": "B", "id": [2, 4], "value": [20, 40], "info": False}
|
2390 |
+
]
|
2391 |
|
2392 |
Note that the "flag" field is not aggregated, and must be the same
|
2393 |
in all instances in the same category, or an error is raised.
|
|
|
2468 |
page = self.wikipedia.page(title)
|
2469 |
|
2470 |
return {"title": page.title, "body": getattr(page, self.mode)}
|
2471 |
+
|
2472 |
+
class Fillna(FieldOperator):
|
2473 |
+
value: Any
|
2474 |
+
def process_value(self, value: Any) -> Any:
|
2475 |
+
import numpy as np
|
2476 |
+
try:
|
2477 |
+
if np.isnan(value):
|
2478 |
+
return self.value
|
2479 |
+
except TypeError:
|
2480 |
+
return value
|
2481 |
+
return value
|
serializers.py
CHANGED
@@ -1,12 +1,13 @@
|
|
1 |
import csv
|
2 |
import io
|
|
|
3 |
from abc import abstractmethod
|
4 |
from typing import Any, Dict, List, Union
|
5 |
|
6 |
from .dataclass import AbstractField, Field
|
7 |
-
from .db_utils import get_db_connector
|
8 |
from .operators import InstanceFieldOperator
|
9 |
from .settings_utils import get_constants
|
|
|
10 |
from .type_utils import isoftype, to_type_string
|
11 |
from .types import (
|
12 |
Dialog,
|
@@ -61,6 +62,13 @@ class ListSerializer(SingleTypeSerializer):
|
|
61 |
return ", ".join(str(item) for item in value)
|
62 |
|
63 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
class DialogSerializer(SingleTypeSerializer):
|
65 |
serialized_type = Dialog
|
66 |
|
|
|
1 |
import csv
|
2 |
import io
|
3 |
+
import json
|
4 |
from abc import abstractmethod
|
5 |
from typing import Any, Dict, List, Union
|
6 |
|
7 |
from .dataclass import AbstractField, Field
|
|
|
8 |
from .operators import InstanceFieldOperator
|
9 |
from .settings_utils import get_constants
|
10 |
+
from .sql_utils import get_db_connector
|
11 |
from .type_utils import isoftype, to_type_string
|
12 |
from .types import (
|
13 |
Dialog,
|
|
|
62 |
return ", ".join(str(item) for item in value)
|
63 |
|
64 |
|
65 |
+
class DictAsJsonSerializer(SingleTypeSerializer):
|
66 |
+
serialized_type = dict
|
67 |
+
|
68 |
+
def serialize(self, value: Any, instance: Dict[str, Any]) -> str:
|
69 |
+
return json.dumps(value)
|
70 |
+
|
71 |
+
|
72 |
class DialogSerializer(SingleTypeSerializer):
|
73 |
serialized_type = Dialog
|
74 |
|
settings_utils.py
CHANGED
@@ -151,11 +151,14 @@ if Settings.is_uninitilized():
|
|
151 |
settings.mock_inference_mode = (bool, False)
|
152 |
settings.disable_hf_datasets_cache = (bool, False)
|
153 |
settings.stream_hf_datasets_by_default = (bool, False)
|
154 |
-
|
155 |
-
settings.
|
156 |
settings.task_data_as_text = (bool, True)
|
157 |
settings.default_provider = "watsonx"
|
158 |
settings.default_format = None
|
|
|
|
|
|
|
159 |
|
160 |
if Constants.is_uninitilized():
|
161 |
constants = Constants()
|
|
|
151 |
settings.mock_inference_mode = (bool, False)
|
152 |
settings.disable_hf_datasets_cache = (bool, False)
|
153 |
settings.stream_hf_datasets_by_default = (bool, False)
|
154 |
+
settings.loader_cache_size = (int, 25)
|
155 |
+
settings.loaders_max_retries = (int, 10)
|
156 |
settings.task_data_as_text = (bool, True)
|
157 |
settings.default_provider = "watsonx"
|
158 |
settings.default_format = None
|
159 |
+
settings.hf_offline_datasets_path = None
|
160 |
+
settings.hf_offline_metrics_path = None
|
161 |
+
settings.hf_offline_models_path = None
|
162 |
|
163 |
if Constants.is_uninitilized():
|
164 |
constants = Constants()
|
db_utils.py → sql_utils.py
RENAMED
@@ -1,5 +1,6 @@
|
|
1 |
import glob
|
2 |
import os
|
|
|
3 |
import sqlite3
|
4 |
import time
|
5 |
from abc import ABC, abstractmethod
|
@@ -47,10 +48,10 @@ def execute_query_local(db_path: str, query: str) -> Any:
|
|
47 |
conn = sqlite3.connect(db_path)
|
48 |
cursor = conn.cursor()
|
49 |
cursor.execute(query)
|
50 |
-
return cursor.fetchall()
|
51 |
except sqlite3.Error as e:
|
52 |
logger.info(f"Error executing SQL: {e}")
|
53 |
-
return None
|
54 |
finally:
|
55 |
if conn:
|
56 |
conn.close()
|
@@ -178,10 +179,10 @@ class InMemoryDatabaseConnector(DatabaseConnector):
|
|
178 |
|
179 |
try:
|
180 |
cursor.execute(query)
|
181 |
-
return cursor.fetchall()
|
182 |
except sqlite3.Error as e:
|
183 |
logger.info(f"Error executing SQL: {e}")
|
184 |
-
return None
|
185 |
finally:
|
186 |
conn.close()
|
187 |
|
@@ -196,7 +197,7 @@ def execute_query_remote(
|
|
196 |
max_retries: int = 3,
|
197 |
retry_delay: int = 5, # seconds
|
198 |
timeout: int = 30, # seconds
|
199 |
-
) -> Optional[dict]:
|
200 |
"""Executes a query against the remote database, with retries for certain exceptions."""
|
201 |
headers = {
|
202 |
"Content-Type": "application/json",
|
@@ -210,11 +211,11 @@ def execute_query_remote(
|
|
210 |
f"{api_url}/sql",
|
211 |
headers=headers,
|
212 |
json={"sql": query, "dataSourceId": database_id},
|
213 |
-
verify=
|
214 |
timeout=timeout,
|
215 |
)
|
216 |
response.raise_for_status()
|
217 |
-
return response.json()
|
218 |
|
219 |
except retryable_exceptions as e:
|
220 |
retries += 1
|
@@ -225,7 +226,10 @@ def execute_query_remote(
|
|
225 |
time.sleep(retry_delay)
|
226 |
else:
|
227 |
logger.error(f"Max retries ({max_retries}) exceeded for query: {query}")
|
228 |
-
return
|
|
|
|
|
|
|
229 |
|
230 |
except requests.exceptions.HTTPError as e:
|
231 |
if e.response.status_code >= 500:
|
@@ -239,16 +243,22 @@ def execute_query_remote(
|
|
239 |
logger.error(
|
240 |
f"Max retries ({max_retries}) exceeded for query: {query}"
|
241 |
)
|
242 |
-
return
|
|
|
|
|
|
|
243 |
else:
|
244 |
logger.error(f"HTTP Error on attempt {retries}: {e}")
|
245 |
-
return
|
|
|
|
|
|
|
246 |
|
247 |
except Exception as e:
|
248 |
logger.error(f"Unexpected error on attempt {retries}: {e}")
|
249 |
-
return None
|
250 |
|
251 |
-
return None
|
252 |
|
253 |
|
254 |
class RemoteDatabaseConnector(DatabaseConnector):
|
@@ -288,11 +298,11 @@ class RemoteDatabaseConnector(DatabaseConnector):
|
|
288 |
self,
|
289 |
) -> str:
|
290 |
"""Retrieves the schema of a database."""
|
291 |
-
cur_api_url = f"{self.api_url}/
|
292 |
response = requests.get(
|
293 |
cur_api_url,
|
294 |
headers=self.headers,
|
295 |
-
verify=
|
296 |
timeout=self.timeout,
|
297 |
)
|
298 |
if response.status_code == 200:
|
@@ -330,3 +340,220 @@ def get_db_connector(db_type: str):
|
|
330 |
raise ValueError(f"Unsupported database type: {db_type}")
|
331 |
|
332 |
return connector
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
import glob
|
2 |
import os
|
3 |
+
import re
|
4 |
import sqlite3
|
5 |
import time
|
6 |
from abc import ABC, abstractmethod
|
|
|
48 |
conn = sqlite3.connect(db_path)
|
49 |
cursor = conn.cursor()
|
50 |
cursor.execute(query)
|
51 |
+
return cursor.fetchall(), None
|
52 |
except sqlite3.Error as e:
|
53 |
logger.info(f"Error executing SQL: {e}")
|
54 |
+
return None, f"Error executing SQL: {e}"
|
55 |
finally:
|
56 |
if conn:
|
57 |
conn.close()
|
|
|
179 |
|
180 |
try:
|
181 |
cursor.execute(query)
|
182 |
+
return cursor.fetchall(), None
|
183 |
except sqlite3.Error as e:
|
184 |
logger.info(f"Error executing SQL: {e}")
|
185 |
+
return None, f"Error executing SQL: {e}"
|
186 |
finally:
|
187 |
conn.close()
|
188 |
|
|
|
197 |
max_retries: int = 3,
|
198 |
retry_delay: int = 5, # seconds
|
199 |
timeout: int = 30, # seconds
|
200 |
+
) -> (Optional[dict], str):
|
201 |
"""Executes a query against the remote database, with retries for certain exceptions."""
|
202 |
headers = {
|
203 |
"Content-Type": "application/json",
|
|
|
211 |
f"{api_url}/sql",
|
212 |
headers=headers,
|
213 |
json={"sql": query, "dataSourceId": database_id},
|
214 |
+
verify=False,
|
215 |
timeout=timeout,
|
216 |
)
|
217 |
response.raise_for_status()
|
218 |
+
return response.json(), None
|
219 |
|
220 |
except retryable_exceptions as e:
|
221 |
retries += 1
|
|
|
226 |
time.sleep(retry_delay)
|
227 |
else:
|
228 |
logger.error(f"Max retries ({max_retries}) exceeded for query: {query}")
|
229 |
+
return (
|
230 |
+
None,
|
231 |
+
f"Max retries ({max_retries}) exceeded for query: {query} - Error: {e!s}",
|
232 |
+
)
|
233 |
|
234 |
except requests.exceptions.HTTPError as e:
|
235 |
if e.response.status_code >= 500:
|
|
|
243 |
logger.error(
|
244 |
f"Max retries ({max_retries}) exceeded for query: {query}"
|
245 |
)
|
246 |
+
return (
|
247 |
+
None,
|
248 |
+
f"Max retries ({max_retries}) exceeded for query: {query} - Error: {e!s}",
|
249 |
+
)
|
250 |
else:
|
251 |
logger.error(f"HTTP Error on attempt {retries}: {e}")
|
252 |
+
return (
|
253 |
+
None,
|
254 |
+
f"HTTP Error on attempt {retries}: {e}",
|
255 |
+
)
|
256 |
|
257 |
except Exception as e:
|
258 |
logger.error(f"Unexpected error on attempt {retries}: {e}")
|
259 |
+
return (None, f"Unexpected error on attempt {retries}: {e}")
|
260 |
|
261 |
+
return None, "Unknown Error in SQL execution"
|
262 |
|
263 |
|
264 |
class RemoteDatabaseConnector(DatabaseConnector):
|
|
|
298 |
self,
|
299 |
) -> str:
|
300 |
"""Retrieves the schema of a database."""
|
301 |
+
cur_api_url = f"{self.api_url}/datasources/{self.database_id}"
|
302 |
response = requests.get(
|
303 |
cur_api_url,
|
304 |
headers=self.headers,
|
305 |
+
verify=False,
|
306 |
timeout=self.timeout,
|
307 |
)
|
308 |
if response.status_code == 200:
|
|
|
340 |
raise ValueError(f"Unsupported database type: {db_type}")
|
341 |
|
342 |
return connector
|
343 |
+
|
344 |
+
|
345 |
+
def is_sqlglot_parsable(sql: str, db_type="sqlite") -> bool:
|
346 |
+
"""Returns True if sqlglot does not encounter any error, False otherwise."""
|
347 |
+
from sqlglot import parse
|
348 |
+
|
349 |
+
if not sql.strip():
|
350 |
+
return False
|
351 |
+
if db_type == "db2":
|
352 |
+
db_type = "postgres" ## TODO: temporary until sqlglot adds support for db2
|
353 |
+
try:
|
354 |
+
parse(sql, read=db_type)
|
355 |
+
return True
|
356 |
+
except Exception as e:
|
357 |
+
logger.debug(f"SQL query could not parse: {e}")
|
358 |
+
return False
|
359 |
+
|
360 |
+
|
361 |
+
def is_sqlparse_parsable(sql: str) -> bool:
|
362 |
+
"""Returns True if sqlparse does not encounter any error, False otherwise."""
|
363 |
+
from sqlparse import parse
|
364 |
+
from sqlparse.tokens import Error
|
365 |
+
|
366 |
+
if not sql.strip():
|
367 |
+
return False
|
368 |
+
try:
|
369 |
+
statements = parse(sql)
|
370 |
+
for statement in statements:
|
371 |
+
for token in statement.tokens:
|
372 |
+
if token.ttype == Error:
|
373 |
+
return False
|
374 |
+
return True
|
375 |
+
except Exception as e:
|
376 |
+
logger.debug(f"SQL query could not parse: {e}")
|
377 |
+
return False
|
378 |
+
|
379 |
+
|
380 |
+
def sqlglot_optimized_equivalence(expected: str, generated: str) -> int:
|
381 |
+
"""Checks if SQL queries are equivalent using SQLGlot parsing, so we don't run them."""
|
382 |
+
from sqlglot import diff, parse_one
|
383 |
+
from sqlglot.optimizer import optimize
|
384 |
+
|
385 |
+
try:
|
386 |
+
t_diff = diff(
|
387 |
+
optimize(parse_one(expected.lower()).sql(pretty=True)),
|
388 |
+
optimize(parse_one(generated.lower()).sql(pretty=True)),
|
389 |
+
)
|
390 |
+
sql_diff = sum(0 if (e.__class__.__name__ == "Keep") else 1 for e in t_diff)
|
391 |
+
|
392 |
+
return 1 if sql_diff == 0 else 0
|
393 |
+
except Exception as e:
|
394 |
+
logger.debug(f"Error parsing SQL for comparison: {e}")
|
395 |
+
return False
|
396 |
+
|
397 |
+
|
398 |
+
def extract_select_columns(statement):
|
399 |
+
"""Parse SQL using sqlparse and extract columns."""
|
400 |
+
from sqlparse.sql import Identifier, IdentifierList
|
401 |
+
from sqlparse.tokens import DML, Keyword
|
402 |
+
|
403 |
+
columns = []
|
404 |
+
select_seen = False
|
405 |
+
for token in statement.tokens:
|
406 |
+
if token.ttype is DML and token.value.upper() == "SELECT":
|
407 |
+
select_seen = True
|
408 |
+
continue
|
409 |
+
if select_seen:
|
410 |
+
if token.ttype is Keyword and token.value.upper() in (
|
411 |
+
"FROM",
|
412 |
+
"WHERE",
|
413 |
+
"GROUP",
|
414 |
+
"HAVING",
|
415 |
+
"ORDER",
|
416 |
+
"LIMIT",
|
417 |
+
):
|
418 |
+
break
|
419 |
+
if isinstance(token, IdentifierList):
|
420 |
+
for identifier in token.get_identifiers():
|
421 |
+
columns.append(strip_alias(identifier.value))
|
422 |
+
elif isinstance(token, Identifier):
|
423 |
+
columns.append(strip_alias(token.value))
|
424 |
+
else:
|
425 |
+
val = token.value.strip()
|
426 |
+
if val:
|
427 |
+
columns.append(strip_alias(val))
|
428 |
+
return frozenset(columns)
|
429 |
+
|
430 |
+
|
431 |
+
def strip_alias(col: str) -> str:
|
432 |
+
"""Remove any AS alias from a column."""
|
433 |
+
col = col.strip()
|
434 |
+
upper = col.upper()
|
435 |
+
if " AS " in upper:
|
436 |
+
return col[: upper.index(" AS ")].strip()
|
437 |
+
parts_alias = col.split()
|
438 |
+
if len(parts_alias) > 1:
|
439 |
+
return " ".join(parts_alias[:-1])
|
440 |
+
return col
|
441 |
+
|
442 |
+
|
443 |
+
def collect_clause(statement, clause_keyword):
|
444 |
+
"""Parse SQL statement and collect clauses."""
|
445 |
+
from sqlparse.tokens import Keyword
|
446 |
+
|
447 |
+
found = False
|
448 |
+
collected = []
|
449 |
+
for token in statement.tokens:
|
450 |
+
tvalue = token.value.upper()
|
451 |
+
if token.ttype is Keyword:
|
452 |
+
if tvalue.startswith(clause_keyword):
|
453 |
+
found = True
|
454 |
+
continue
|
455 |
+
if found and tvalue in (
|
456 |
+
"FROM",
|
457 |
+
"WHERE",
|
458 |
+
"GROUP",
|
459 |
+
"HAVING",
|
460 |
+
"ORDER",
|
461 |
+
"LIMIT",
|
462 |
+
):
|
463 |
+
break
|
464 |
+
if found:
|
465 |
+
collected.append(token.value)
|
466 |
+
return " ".join(collected).strip()
|
467 |
+
|
468 |
+
|
469 |
+
def extract_select_info(sql: str):
|
470 |
+
"""Parse SQL using sqlparse and return a dict of extracted columns and clauses."""
|
471 |
+
from sqlparse import parse
|
472 |
+
from sqlparse.tokens import DML
|
473 |
+
|
474 |
+
statements = parse(sql)
|
475 |
+
if len(statements) != 1:
|
476 |
+
return None
|
477 |
+
stmt = statements[0]
|
478 |
+
if not any(t.ttype is DML and t.value.upper() == "SELECT" for t in stmt.tokens):
|
479 |
+
return None
|
480 |
+
parts = {
|
481 |
+
"columns": None,
|
482 |
+
"from": "",
|
483 |
+
"where": "",
|
484 |
+
"group": "",
|
485 |
+
"having": "",
|
486 |
+
"order": "",
|
487 |
+
}
|
488 |
+
columns = extract_select_columns(stmt)
|
489 |
+
if not columns:
|
490 |
+
columns = frozenset()
|
491 |
+
parts["columns"] = columns
|
492 |
+
parts["from"] = collect_clause(stmt, "FROM")
|
493 |
+
parts["where"] = collect_clause(stmt, "WHERE")
|
494 |
+
parts["group"] = collect_clause(stmt, "GROUP")
|
495 |
+
parts["having"] = collect_clause(stmt, "HAVING")
|
496 |
+
parts["order"] = collect_clause(stmt, "ORDER")
|
497 |
+
return parts
|
498 |
+
|
499 |
+
|
500 |
+
def sqlparse_queries_equivalent(sql1: str, sql2: str) -> bool:
|
501 |
+
"""Return True if both SQL queries are naively considered equivalent."""
|
502 |
+
try:
|
503 |
+
info1 = extract_select_info(sql1)
|
504 |
+
info2 = extract_select_info(sql2)
|
505 |
+
if not info1 or not info2:
|
506 |
+
return False
|
507 |
+
if info1["columns"] != info2["columns"]:
|
508 |
+
return False
|
509 |
+
for k in ["from", "where", "group", "having", "order"]:
|
510 |
+
if info1[k].replace(" ", "").upper() != info2[k].replace(" ", "").upper():
|
511 |
+
return False
|
512 |
+
return True
|
513 |
+
except Exception as e:
|
514 |
+
logger.debug(f"Errpr parsing SQL query for comparison: {e}")
|
515 |
+
return False
|
516 |
+
|
517 |
+
|
518 |
+
def sqlglot_parsed_queries_equivalent(sql1: str, sql2: str, dialect: str = "") -> bool:
|
519 |
+
from sqlglot import exp, parse_one
|
520 |
+
|
521 |
+
try:
|
522 |
+
ast1 = parse_one(sql1, read=dialect)
|
523 |
+
ast2 = parse_one(sql2, read=dialect)
|
524 |
+
except:
|
525 |
+
return False
|
526 |
+
if not (isinstance(ast1, exp.Select) and isinstance(ast2, exp.Select)):
|
527 |
+
return False
|
528 |
+
|
529 |
+
def normalized_select_columns(select_expr: exp.Select):
|
530 |
+
cols = []
|
531 |
+
for item in select_expr.expressions:
|
532 |
+
copy_item = item.copy()
|
533 |
+
copy_item.set("alias", None)
|
534 |
+
cols.append(copy_item.sql(dialect=dialect, normalize=True))
|
535 |
+
return frozenset(cols)
|
536 |
+
|
537 |
+
if normalized_select_columns(ast1) != normalized_select_columns(ast2):
|
538 |
+
return False
|
539 |
+
|
540 |
+
def normalized_clause(expr: exp.Expression, key: str):
|
541 |
+
clause = expr.args.get(key)
|
542 |
+
return clause.sql(dialect=dialect, normalize=True) if clause else ""
|
543 |
+
|
544 |
+
for clause_key in ("from", "where", "group", "having", "order"):
|
545 |
+
if normalized_clause(ast1, clause_key) != normalized_clause(ast2, clause_key):
|
546 |
+
return False
|
547 |
+
|
548 |
+
return True
|
549 |
+
|
550 |
+
|
551 |
+
def sql_exact_match(sql1: str, sql2: str) -> bool:
|
552 |
+
"""Return True if two SQL strings match after very basic normalization."""
|
553 |
+
|
554 |
+
def normalize_sql(s: str) -> str:
|
555 |
+
s = s.strip().rstrip(";")
|
556 |
+
s = re.sub(r"\s+", " ", s)
|
557 |
+
return s.upper()
|
558 |
+
|
559 |
+
return normalize_sql(sql1) == normalize_sql(sql2)
|
standard.py
CHANGED
@@ -27,7 +27,12 @@ from .splitters import ConstantSizeSample, RandomSizeSample, Sampler
|
|
27 |
from .stream import MultiStream
|
28 |
from .system_prompts import EmptySystemPrompt, SystemPrompt
|
29 |
from .task import Task
|
30 |
-
from .templates import
|
|
|
|
|
|
|
|
|
|
|
31 |
from .type_utils import isoftype
|
32 |
from .utils import LRUCache, recursive_copy
|
33 |
|
@@ -658,35 +663,31 @@ class DatasetRecipe(SourceSequentialOperator):
|
|
658 |
|
659 |
self.finalize.steps.append(FinalizeDataset(group_by=self.group_by))
|
660 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
661 |
def prepare(self):
|
662 |
assert (
|
663 |
self.template_card_index is None or self.template is None
|
664 |
), f"Specify either template ({self.template}) or template_card_index ({self.template_card_index}) but not both"
|
665 |
|
666 |
-
if self.
|
667 |
-
|
668 |
-
|
669 |
-
|
|
|
|
|
|
|
|
|
|
|
670 |
else:
|
671 |
self.template = self.card.task.default_template
|
672 |
|
673 |
-
# Than try to infer the default
|
674 |
-
if self.template is None:
|
675 |
-
if (
|
676 |
-
self.card is not None
|
677 |
-
and self.card.templates is not None
|
678 |
-
and len(self.card.templates) > 0
|
679 |
-
):
|
680 |
-
self.template_card_index = (
|
681 |
-
0
|
682 |
-
if isinstance(self.card.templates, list)
|
683 |
-
else next(iter(self.card.templates.keys()))
|
684 |
-
)
|
685 |
-
logger.warning(
|
686 |
-
"Template was not specified in recipe, using the first template from the card by default."
|
687 |
-
)
|
688 |
-
else:
|
689 |
-
self.template = self.card.task.default_template
|
690 |
|
691 |
if self.template is None and self.template_card_index is not None:
|
692 |
try:
|
@@ -704,6 +705,7 @@ class DatasetRecipe(SourceSequentialOperator):
|
|
704 |
raise ValueError(
|
705 |
"No template was specified in the the 'template' or 'template_card_index' recipe arguments, and no default templates are defined the card or task"
|
706 |
)
|
|
|
707 |
if self.use_demos:
|
708 |
assert (
|
709 |
self.demos_pool is not None
|
@@ -726,6 +728,7 @@ class DatasetRecipe(SourceSequentialOperator):
|
|
726 |
|
727 |
if isinstance(self.template, TemplatesList):
|
728 |
self.template = self.template.items
|
|
|
729 |
self.reset_pipeline()
|
730 |
|
731 |
|
|
|
27 |
from .stream import MultiStream
|
28 |
from .system_prompts import EmptySystemPrompt, SystemPrompt
|
29 |
from .task import Task
|
30 |
+
from .templates import (
|
31 |
+
ApplyRandomTemplate,
|
32 |
+
ApplySingleTemplate,
|
33 |
+
Template,
|
34 |
+
TemplatesList,
|
35 |
+
)
|
36 |
from .type_utils import isoftype
|
37 |
from .utils import LRUCache, recursive_copy
|
38 |
|
|
|
663 |
|
664 |
self.finalize.steps.append(FinalizeDataset(group_by=self.group_by))
|
665 |
|
666 |
+
@property
|
667 |
+
def has_card_templates(self):
|
668 |
+
return self.card is not None and self.card.templates is not None and len(self.card.templates) > 0
|
669 |
+
|
670 |
+
@property
|
671 |
+
def has_no_templates(self):
|
672 |
+
return self.template_card_index is None and self.template is None
|
673 |
+
|
674 |
def prepare(self):
|
675 |
assert (
|
676 |
self.template_card_index is None or self.template is None
|
677 |
), f"Specify either template ({self.template}) or template_card_index ({self.template_card_index}) but not both"
|
678 |
|
679 |
+
if self.has_no_templates:
|
680 |
+
if self.has_card_templates:
|
681 |
+
if isinstance(self.card.templates, list):
|
682 |
+
self.template_card_index = 0
|
683 |
+
else:
|
684 |
+
self.template_card_index = next(iter(self.card.templates.keys()))
|
685 |
+
logger.warning(
|
686 |
+
"Template was not specified in recipe, using the first template from the card by default."
|
687 |
+
)
|
688 |
else:
|
689 |
self.template = self.card.task.default_template
|
690 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
691 |
|
692 |
if self.template is None and self.template_card_index is not None:
|
693 |
try:
|
|
|
705 |
raise ValueError(
|
706 |
"No template was specified in the the 'template' or 'template_card_index' recipe arguments, and no default templates are defined the card or task"
|
707 |
)
|
708 |
+
|
709 |
if self.use_demos:
|
710 |
assert (
|
711 |
self.demos_pool is not None
|
|
|
728 |
|
729 |
if isinstance(self.template, TemplatesList):
|
730 |
self.template = self.template.items
|
731 |
+
|
732 |
self.reset_pipeline()
|
733 |
|
734 |
|
string_operators.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import re
|
2 |
from typing import (
|
3 |
Any,
|
@@ -7,7 +8,9 @@ from typing import (
|
|
7 |
)
|
8 |
|
9 |
from .operators import FieldOperator, InstanceOperator
|
|
|
10 |
|
|
|
11 |
|
12 |
class Split(FieldOperator):
|
13 |
by: str
|
@@ -30,8 +33,10 @@ class TokensSplit(FieldOperator):
|
|
30 |
def prepare(self):
|
31 |
super().prepare()
|
32 |
from transformers import AutoTokenizer
|
33 |
-
|
34 |
-
|
|
|
|
|
35 |
|
36 |
def process_value(self, value: str) -> List[str]:
|
37 |
return self.tokenizer.tokenize(value)
|
@@ -48,8 +53,10 @@ class TokensSlice(FieldOperator):
|
|
48 |
def prepare(self):
|
49 |
super().prepare()
|
50 |
from transformers import AutoTokenizer
|
51 |
-
|
52 |
-
|
|
|
|
|
53 |
|
54 |
def process_value(self, value: str) -> str:
|
55 |
encoded = self.tokenizer.encode(value)
|
|
|
1 |
+
import os
|
2 |
import re
|
3 |
from typing import (
|
4 |
Any,
|
|
|
8 |
)
|
9 |
|
10 |
from .operators import FieldOperator, InstanceOperator
|
11 |
+
from .settings_utils import get_settings
|
12 |
|
13 |
+
settings = get_settings()
|
14 |
|
15 |
class Split(FieldOperator):
|
16 |
by: str
|
|
|
33 |
def prepare(self):
|
34 |
super().prepare()
|
35 |
from transformers import AutoTokenizer
|
36 |
+
path = self.model
|
37 |
+
if settings.hf_offline_models_path is not None:
|
38 |
+
path = os.path.join(settings.hf_offline_models_path, path)
|
39 |
+
self.tokenizer = AutoTokenizer.from_pretrained(path)
|
40 |
|
41 |
def process_value(self, value: str) -> List[str]:
|
42 |
return self.tokenizer.tokenize(value)
|
|
|
53 |
def prepare(self):
|
54 |
super().prepare()
|
55 |
from transformers import AutoTokenizer
|
56 |
+
path = self.model
|
57 |
+
if settings.hf_offline_models_path is not None:
|
58 |
+
path = os.path.join(settings.hf_offline_models_path, path)
|
59 |
+
self.tokenizer = AutoTokenizer.from_pretrained(path)
|
60 |
|
61 |
def process_value(self, value: str) -> str:
|
62 |
encoded = self.tokenizer.encode(value)
|
struct_data_operators.py
CHANGED
@@ -23,7 +23,6 @@ For key-value pairs, expected input format is:
|
|
23 |
{"key1": "value1", "key2": value2, "key3": "value3"}
|
24 |
"""
|
25 |
|
26 |
-
import ast
|
27 |
import json
|
28 |
import random
|
29 |
from abc import ABC, abstractmethod
|
@@ -43,6 +42,7 @@ from .error_utils import UnitxtWarning
|
|
43 |
from .operators import FieldOperator, InstanceOperator
|
44 |
from .random_utils import new_random_generator
|
45 |
from .serializers import ImageSerializer, TableSerializer
|
|
|
46 |
from .types import Table
|
47 |
from .utils import recursive_copy
|
48 |
|
@@ -1025,16 +1025,21 @@ class ShuffleColumnsNames(TypeDependentAugmentor):
|
|
1025 |
|
1026 |
|
1027 |
class JsonStrToListOfKeyValuePairs(FieldOperator):
|
1028 |
-
|
1029 |
-
text = text.replace("null", "None")
|
1030 |
|
|
|
1031 |
try:
|
1032 |
-
dict_value =
|
1033 |
except Exception as e:
|
1034 |
UnitxtWarning(
|
1035 |
f"Unable to convert input text to json format in JsonStrToListOfKeyValuePairs due to {e}. Text: {text}"
|
1036 |
)
|
1037 |
dict_value = {}
|
|
|
|
|
|
|
|
|
|
|
1038 |
return [
|
1039 |
(str(key), str(value))
|
1040 |
for key, value in dict_value.items()
|
|
|
23 |
{"key1": "value1", "key2": value2, "key3": "value3"}
|
24 |
"""
|
25 |
|
|
|
26 |
import json
|
27 |
import random
|
28 |
from abc import ABC, abstractmethod
|
|
|
42 |
from .operators import FieldOperator, InstanceOperator
|
43 |
from .random_utils import new_random_generator
|
44 |
from .serializers import ImageSerializer, TableSerializer
|
45 |
+
from .type_utils import isoftype
|
46 |
from .types import Table
|
47 |
from .utils import recursive_copy
|
48 |
|
|
|
1025 |
|
1026 |
|
1027 |
class JsonStrToListOfKeyValuePairs(FieldOperator):
|
1028 |
+
"""Convert a Json string of representing key value as dictionary to list of key value pairs."""
|
|
|
1029 |
|
1030 |
+
def process_value(self, text: str) -> List[Tuple[str, str]]:
|
1031 |
try:
|
1032 |
+
dict_value = json.loads(text)
|
1033 |
except Exception as e:
|
1034 |
UnitxtWarning(
|
1035 |
f"Unable to convert input text to json format in JsonStrToListOfKeyValuePairs due to {e}. Text: {text}"
|
1036 |
)
|
1037 |
dict_value = {}
|
1038 |
+
if not isoftype(dict_value, Dict[str, Any]):
|
1039 |
+
UnitxtWarning(
|
1040 |
+
f"Unable to convert input text to dictionary in JsonStrToListOfKeyValuePairs. Text: {text}"
|
1041 |
+
)
|
1042 |
+
dict_value = {}
|
1043 |
return [
|
1044 |
(str(key), str(value))
|
1045 |
for key, value in dict_value.items()
|
version.py
CHANGED
@@ -1 +1 @@
|
|
1 |
-
version = "1.
|
|
|
1 |
+
version = "1.19.0"
|