SansarK commited on
Commit
908c319
·
verified ·
1 Parent(s): 035a5bb

Upload rag_with_mircosoftphi2_and_hf_embeddings.py

Browse files
rag_with_mircosoftphi2_and_hf_embeddings.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ """RAG_with_MircosoftPhi2_and_HF_Embeddings.ipynb
3
+
4
+ Automatically generated by Colab.
5
+
6
+ Original file is located at
7
+ https://colab.research.google.com/github/sumant1122/RAG-Phi2-LlamaIndex/blob/main/RAG_with_MircosoftPhi2_and_HF_Embeddings.ipynb
8
+ """
9
+
10
+ !pip install -q pypdf
11
+ !pip install -q python-dotenv
12
+ !pip install -q llama-index
13
+ !pip install -q llama-index-llms-huggingface
14
+ !pip install -q llama-index-embeddings-huggingface
15
+ !pip install -q gradio
16
+ !pip install einops
17
+ !pip install accelerate
18
+ !pip install -q llama-cpp-python
19
+
20
+ !pip install llama-index-llms-llama-cpp llama-index-embeddings-huggingface
21
+
22
+ from llama_index.core import VectorStoreIndex,SimpleDirectoryReader,ServiceContext
23
+ import torch
24
+
25
+ documents = SimpleDirectoryReader("/content/rag").load_data()
26
+
27
+ """New sectiond"""
28
+
29
+ from llama_index.core.prompts.prompts import SimpleInputPrompt
30
+ from llama_index.llms.llama_cpp import LlamaCPP
31
+
32
+ system_prompt = "You are a Q&A assistant. Your goal is to answer questions as accurately as possible based on the instructions and context provided."
33
+
34
+ # This will wrap the default prompts that are internal to llama-index
35
+ query_wrapper_prompt = SimpleInputPrompt("<|USER|>{query_str}<|ASSISTANT|>")
36
+
37
+ # model_url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGML/resolve/main/llama-2-13b-chat.ggmlv3.q4_0.bin"
38
+ model_url = "https://huggingface.co/TheBloke/Llama-2-13B-chat-GGUF/resolve/main/llama-2-13b-chat.Q4_0.gguf"
39
+
40
+ llm = LlamaCPP(
41
+ # You can pass in the URL to a GGML model to download it automatically
42
+ model_url=model_url,
43
+ # optionally, you can set the path to a pre-downloaded model instead of model_url
44
+ model_path=None,
45
+ temperature=0.1,
46
+ max_new_tokens=256,
47
+ # llama2 has a context window of 4096 tokens, but we set it lower to allow for some wiggle room
48
+ context_window=3900,
49
+ # kwargs to pass to __call__()
50
+ generate_kwargs={},
51
+ # kwargs to pass to __init__()
52
+ # set to at least 1 to use GPU
53
+ model_kwargs={"n_gpu_layers": 1},
54
+ verbose=True,
55
+ )
56
+
57
+ """HuggingFace Embeddings"""
58
+
59
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
60
+ # loads BAAI/bge-small-en-v1.5
61
+ embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
62
+
63
+ service_context = ServiceContext.from_defaults(
64
+ chunk_size=256,
65
+ llm=llm,
66
+ embed_model=embed_model
67
+ )
68
+
69
+ """predict"""
70
+
71
+ index = VectorStoreIndex.from_documents(documents, service_context=service_context)
72
+
73
+ query_engine = index.as_query_engine()
74
+
75
+ def predict(input, history):
76
+ response = query_engine.query(input)
77
+ return str(response)
78
+
79
+ """Gradio"""
80
+
81
+ import gradio as gr
82
+
83
+ gr.ChatInterface(predict).launch(share=True)