Skip to content
Open
3 changes: 1 addition & 2 deletions examples/evaluate_existing_dataset_by_llm_as_judge_direct.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,7 @@
For the arguments these inference engines can receive, please refer to the classes documentation or read
about the the open ai api arguments the CrossProviderInferenceEngine follows.
"""
predictions = inference_model.infer(dataset)

predictions = inference_model(dataset)
gold_answers = [d[0] for d in dataset["references"]]

# Evaluate the predictions using the defined metric.
Expand Down
6 changes: 0 additions & 6 deletions src/unitxt/api.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
from .error_utils import UnitxtError
from .inference import (
InferenceEngine,
LogProbInferenceEngine,
OptionSelectingByLogProbsInferenceEngine,
)
from .loaders import LoadFromDictionary
Expand Down Expand Up @@ -380,11 +379,6 @@ def add_previous_messages(example, index):
dataset = dataset.map(add_previous_messages, with_indices=True)
engine, _ = fetch_artifact(engine)
if return_log_probs:
if not isinstance(engine, LogProbInferenceEngine):
raise NotImplementedError(
f"Error in infer: return_log_probs set to True but supplied engine "
f"{engine.__class__.__name__} does not support logprobs."
)
infer_outputs = engine.infer_log_probs(dataset, return_meta_data)
raw_predictions = (
[output.prediction for output in infer_outputs]
Expand Down
Loading