from openai import OpenAI
class Response(BaseModel):
response: str
llm = ragas_llm(provider="openai",model="gpt-4o",client=OpenAI())
llm.generate("What is the capital of India?",response_model=Response) #works fine
try:
await llm.agenerate("What is the capital of India?", response_model=Response)
except TypeError as e:
assert isinstance(e, TypeError)
#gives TypeError: object Response can't be used in 'await' expressionLLMs
ragas_llm
ragas_llm (provider:str, model:str, client:Any, **model_args)
RagasLLM
RagasLLM (provider:str, model:str, client:Any, **model_args)
Initialize self. See help(type(self)) for accurate signature.
Example Usage
from openai import AsyncOpenAI
llm = ragas_llm(provider="openai",model="gpt-4o",client=AsyncOpenAI())
await llm.agenerate("What is the capital of India?",response_model=Response)Response(response='The capital of India is New Delhi.')
from anthropic import Anthropic
llm = ragas_llm(provider="anthropic",model="claude-3-opus-20240229",client=Anthropic(),max_tokens=1024)
llm.generate("What is the capital of India?",response_model=Response)Response(response='The capital of India is New Delhi.')