Base class for all metrics in the LLM evaluation library.
Example
from ragas_experimental.llm import ragas_llmfrom openai import OpenAIllm = ragas_llm(provider="openai",model="gpt-4o",client=OpenAI())@dataclassclass CustomMetric(Metric): values: t.List[str] = field(default_factory=lambda: ["pass", "fail"])def _get_response_model(self, with_reasoning: bool) -> t.Type[BaseModel]:"""Get or create a response model based on reasoning parameter."""class mymodel(BaseModel): result: int reason: t.Optional[str] =Nonereturn mymodel def _ensemble(self,results:t.List[MetricResult]) -> MetricResult:return results[0] # Placeholder for ensemble logicmy_metric = CustomMetric(name="example", prompt="What is the result of {input}?", llm=llm)my_metric.score(input="test")