查询知识库应用
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

66 lines
2.4 KiB

  1. # from chromadb.config import Settings
  2. from sentence_transformers import SentenceTransformer
  3. import chromadb
  4. from tqdm import tqdm
  5. from langchain.text_splitter import RecursiveCharacterTextSplitter
  6. import uuid
  7. class LangChainChroma:
  8. def __init__(self,collection_name):
  9. dirPath="../chromaDB/allField/"
  10. self.chroma_client = chromadb.PersistentClient(path=dirPath+collection_name)
  11. # chroma_client=chromadb.Client(Settings(allow_reset=True,persist_directory="../allField/demo/"))
  12. self.collection=self.chroma_client.get_or_create_collection(name=collection_name,metadata={"hnsw:space": "cosine"})
  13. model = SentenceTransformer('text_analysis/shibing624/text2vec-base-chinese')
  14. # model = SentenceTransformer('shibing624/text2vec-base-chinese')
  15. self.bge_model = model
  16. self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=500,chunk_overlap=0,separators=["\n\n", "\n", " ", ""])
  17. # chroma_client.reset()
  18. def db_close(self):
  19. self.chroma_client.clear_system_cache()
  20. def embedding_fn(self, paragraphs):
  21. '''文本向量化'''
  22. doc_vecs = [
  23. self.bge_model.encode(doc, normalize_embeddings=True).tolist()
  24. for doc in paragraphs
  25. ]
  26. return doc_vecs
  27. def add_documents(self,documents):
  28. # embeddings=get_embeddings(documents)
  29. #向collection中添加文档与向量
  30. ids = ["id-{}".format(uuid.uuid1()) for i in range(len(documents))]
  31. self.collection.add(
  32. embeddings=self.embedding_fn(documents),#每个文档的向量
  33. documents=documents,
  34. ids=ids
  35. )
  36. # logging.info('当前数据划分{}个块。数据库共有{}个块'.format(len(documents),db_count))
  37. return ids
  38. def search(self,queryQ,top_n):
  39. results=self.collection.query(
  40. # query_texts=[query],
  41. query_embeddings=self.embedding_fn([queryQ]),
  42. n_results=top_n
  43. )
  44. return results
  45. # vector_db=LangChainChroma("demo")
  46. # with open("policy_test2.txt", "r", encoding="utf8") as f:
  47. # for line in tqdm(f):
  48. # # documents = [Document(page_content=line)]
  49. # docs = text_splitter.split_text(line)
  50. # a=vector_db.add_documents(docs)
  51. # print(a)
  52. # print("over")
  53. # user_query="鲍炳章同志?"
  54. # results=vector_db.search(user_query,3)
  55. # for para in results['documents'][0]:
  56. # print(para+'\n')