from openai import OpenAI
class Response(BaseModel):
str
response:
= ragas_llm(provider="openai",model="gpt-4o",client=OpenAI())
llm "What is the capital of India?",response_model=Response) #works fine
llm.generate(
try:
await llm.agenerate("What is the capital of India?", response_model=Response)
except TypeError as e:
assert isinstance(e, TypeError)
#gives TypeError: object Response can't be used in 'await' expression
LLMs
ragas_llm
ragas_llm (provider:str, model:str, client:Any, **model_args)
RagasLLM
RagasLLM (provider:str, model:str, client:Any, **model_args)
Initialize self. See help(type(self)) for accurate signature.
Example Usage
from openai import AsyncOpenAI
= ragas_llm(provider="openai",model="gpt-4o",client=AsyncOpenAI())
llm await llm.agenerate("What is the capital of India?",response_model=Response)
Response(response='The capital of India is New Delhi.')
from anthropic import Anthropic
= ragas_llm(provider="anthropic",model="claude-3-opus-20240229",client=Anthropic(),max_tokens=1024)
llm "What is the capital of India?",response_model=Response) llm.generate(
Response(response='The capital of India is New Delhi.')