ValidationError: 1 validation error for RetrievalQA #17418
-
Checked other resources
Commit to Help
Example Codefrom langchain.prompts import PromptTemplate
#from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.embeddings.sentence_transformer import (
SentenceTransformerEmbeddings,
)
from langchain_community.vectorstores.faiss import FAISS
from langchain_community.llms.ctransformers import CTransformers
from langchain.chains import RetrievalQA
import chainlit as cl
import warnings
warnings.filterwarnings('ignore')
DB_FAISS_PATH = "vectorstores/db_faiss"
custom_prompt_template = """Use the following pieces of information to answer the user's question.
If you don't know the answer, please just say that you dont' know the answer.
Context:{context}
Question:{question}
Only return the helpul answer below and nothing else.
Helpful answer:
"""
def set_custom_prompt():
"""
prompt template for QA retrieval for each vector store
"""
prompt = PromptTemplate(template=custom_prompt_template, input_variables=['context','question'])
return prompt
def load_llm():
llm = CTransformers(
model="llama-2-7b-chat.ggmlv3.q8_0.bin",
model_type="llama",
max_new_token = 512,
temperature = 0.5
)
return llm
def retrieval_qa_chain(llm, prompt, db):
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever = db.as_retriever(search_kwargs={'k':2}),
return_soruce_document = True,
chain_type_kwargs={'prompt':prompt}
)
return qa_chain
def qa_bot():
# embeddings = HuggingFaceEmbeddings(model='sentence-transformers/all-MiniLM-L6-v2')
embedding_function = SentenceTransformerEmbeddings(model_name="all-MiniLM-L6-v2")
db = FAISS.load_local(DB_FAISS_PATH, embedding_function)
llm = load_llm()
qa_prompt = set_custom_prompt()
qa = retrieval_qa_chain(llm,qa_prompt,db)
return qa
def final_result(query):
qa_result = qa_bot()
response = qa_result({'query':query})
return response
#chainlit code
@cl.on_chat_start
async def start():
chain = qa_bot()
msg = cl.Message(content='Starting SPCBot')
await msg.send()
msg.content = "Hi, ask me anything about SPC"
await msg.update()
cl.user_session.set("chain", chain)
@cl.on_message
async def main(message: cl.Message):
chain = cl.user_session.get("chain")
cb = cl.AsyncLangchainCallbackHandler(
stream_final_answer=True, answer_prefix_tokens=["FINAL", "ANSWER"]
)
cb.answer_reached = True
res = await chain.acall(message.content, callbacks=[cb])
answer = res["result"]
await cl.Message(content=answer).send() DescriptionIm trying to build a new chat bot. For some reason i alwasy get the error " File "pydantic\main.py", line 341, in pydantic.main.BaseModel.init I tried reinstalling langchain, but still running into the same error any solutions/suggestions? System Infoabsl-py==2.1.0 |
Beta Was this translation helpful? Give feedback.
Replies: 1 comment 2 replies
-
🤖 Hey @siddhant250502! Great to see you diving into new challenges with LangChain. How's everything going on your end? Based on the error message you provided, it seems like there's a typo in your code. You're trying to set the Here's the corrected code: def retrieval_qa_chain(llm, prompt, db):
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
chain_type="stuff",
retriever = db.as_retriever(search_kwargs={'k':2}),
return_source_document = True,
chain_type_kwargs={'prompt':prompt}
)
return qa_chain Also, based on the issue #16323 and issue #15700 in the LangChain repository, it seems like there might be some changes with the docarray integration. You might want to check the latest updates on these issues for more information. Regarding the usage of I hope this helps! If you have any other questions or need further clarification, feel free to ask. Sources
|
Beta Was this translation helpful? Give feedback.
🤖
Hey @siddhant250502! Great to see you diving into new challenges with LangChain. How's everything going on your end?
Based on the error message you provided, it seems like there's a typo in your code. You're trying to set the
return_soruce_document
parameter, but it should bereturn_source_document
.Here's the corrected code:
Also, based on the issue #16323 and issue #15700 in the LangCha…