Update rag_llamaindex.py
Browse files- rag_llamaindex.py +2 -1
rag_llamaindex.py
CHANGED
@@ -2,6 +2,7 @@ import os, requests
|
|
2 |
|
3 |
from llama_hub.youtube_transcript import YoutubeTranscriptReader
|
4 |
from llama_index import download_loader, PromptTemplate, ServiceContext
|
|
|
5 |
from llama_index.indices.vector_store.base import VectorStoreIndex
|
6 |
from llama_index.llms import OpenAI
|
7 |
from llama_index.storage.storage_context import StorageContext
|
@@ -67,7 +68,7 @@ class LlamaIndexRAG(BaseRAG):
|
|
67 |
return ServiceContext.from_defaults(
|
68 |
chunk_overlap = config["chunk_overlap"],
|
69 |
chunk_size = config["chunk_size"],
|
70 |
-
embed_model =
|
71 |
llm = self.get_llm(config)
|
72 |
)
|
73 |
|
|
|
2 |
|
3 |
from llama_hub.youtube_transcript import YoutubeTranscriptReader
|
4 |
from llama_index import download_loader, PromptTemplate, ServiceContext
|
5 |
+
from llama_index.embeddings import OpenAIEmbedding
|
6 |
from llama_index.indices.vector_store.base import VectorStoreIndex
|
7 |
from llama_index.llms import OpenAI
|
8 |
from llama_index.storage.storage_context import StorageContext
|
|
|
68 |
return ServiceContext.from_defaults(
|
69 |
chunk_overlap = config["chunk_overlap"],
|
70 |
chunk_size = config["chunk_size"],
|
71 |
+
embed_model = OpenAIEmbedding(),
|
72 |
llm = self.get_llm(config)
|
73 |
)
|
74 |
|