Update app.py
Browse files
app.py
CHANGED
@@ -1,92 +1,85 @@
|
|
1 |
# Import Library yang Diperlukan
|
2 |
import gradio as gr
|
3 |
-
import
|
4 |
-
import
|
5 |
-
import subprocess
|
6 |
from llama_cpp import Llama
|
7 |
-
from llama_index.core import
|
8 |
-
from llama_index.core.
|
9 |
-
from llama_index.llms.llama_cpp import LlamaCPP
|
10 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
|
|
11 |
from huggingface_hub import hf_hub_download
|
12 |
-
from llama_index.core.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
13 |
|
14 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
15 |
def initialize_llama_model():
|
16 |
-
# Unduh model jika belum ada di direktori kerja
|
17 |
model_path = hf_hub_download(
|
18 |
-
repo_id="TheBLoke/zephyr-7b-beta-GGUF", #
|
19 |
-
filename="zephyr-7b-beta.Q4_K_M.gguf", # Nama file model
|
20 |
-
cache_dir="./models"
|
21 |
)
|
22 |
return model_path
|
23 |
|
24 |
-
#
|
25 |
-
|
|
|
|
|
26 |
Settings.llm = LlamaCPP(
|
27 |
model_path=model_path,
|
28 |
temperature=0.7,
|
29 |
)
|
30 |
|
31 |
-
#
|
|
|
|
|
32 |
def initialize_index():
|
33 |
-
#
|
34 |
-
|
35 |
-
|
36 |
-
|
37 |
-
|
38 |
-
"bahandokumen/disiplinkerja.txt",
|
39 |
-
"bahandokumen/fasilitas&bantuan.txt",
|
40 |
-
"bahandokumen/fasilitaskerja.txt",
|
41 |
-
"bahandokumen/hak.txt",
|
42 |
-
"bahandokumen/hubunganpengusaha&serikat.txt",
|
43 |
-
"bahandokumen/istilah.txt",
|
44 |
-
"bahandokumen/jaminanserikat.txt",
|
45 |
-
"bahandokumen/jamkes.txt",
|
46 |
-
"bahandokumen/jamsos.txt",
|
47 |
-
"bahandokumen/keluhkesah.txt",
|
48 |
-
"bahandokumen/kenaikanupah.txt",
|
49 |
-
"bahandokumen/kewajiban.txt",
|
50 |
-
"bahandokumen/kompensasi.txt",
|
51 |
-
"bahandokumen/larangan.txt",
|
52 |
-
"bahandokumen/lembur.txt",
|
53 |
-
"bahandokumen/luaskesepakatan.txt",
|
54 |
-
"bahandokumen/mogok.txt",
|
55 |
-
"bahandokumen/pelanggaran&sanksi.txt",
|
56 |
-
"bahandokumen/pendidikan.txt",
|
57 |
-
"bahandokumen/pengangkatan.txt",
|
58 |
-
"bahandokumen/penilaian&promosi.txt",
|
59 |
-
"bahandokumen/pensiun.txt",
|
60 |
-
"bahandokumen/perjadin.txt",
|
61 |
-
"bahandokumen/pesangon.txt",
|
62 |
-
"bahandokumen/phk.txt",
|
63 |
-
"bahandokumen/pihak.txt",
|
64 |
-
"bahandokumen/pkb.txt",
|
65 |
-
"bahandokumen/resign.txt",
|
66 |
-
"bahandokumen/sanksi.txt",
|
67 |
-
"bahandokumen/shift.txt",
|
68 |
-
"bahandokumen/syaratkerja.txt",
|
69 |
-
"bahandokumen/sisacuti.txt",
|
70 |
-
"bahandokumen/target.txt",
|
71 |
-
"bahandokumen/tatacara.txt",
|
72 |
-
"bahandokumen/tka.txt",
|
73 |
-
"bahandokumen/tunjangan.txt",
|
74 |
-
"bahandokumen/uangpisah.txt",
|
75 |
-
"bahandokumen/upah.txt",
|
76 |
-
"bahandokumen/upahlembur.txt",
|
77 |
-
"bahandokumen/waktukerja.txt"]).load_data()
|
78 |
|
|
|
79 |
parser = SentenceSplitter(chunk_size=150, chunk_overlap=10)
|
80 |
nodes = parser.get_nodes_from_documents(documents)
|
|
|
|
|
81 |
embedding = HuggingFaceEmbedding("sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2")
|
82 |
Settings.embed_model = embedding
|
|
|
|
|
83 |
index = VectorStoreIndex(nodes)
|
84 |
return index
|
85 |
|
86 |
-
#
|
|
|
|
|
87 |
def initialize_chat_engine(index):
|
88 |
-
from llama_index.core.prompts import PromptTemplate
|
89 |
-
from llama_index.core.chat_engine.condense_plus_context import CondensePlusContextChatEngine
|
90 |
retriever = index.as_retriever(similarity_top_k=3)
|
91 |
chat_engine = CondensePlusContextChatEngine.from_defaults(
|
92 |
retriever=retriever,
|
@@ -94,45 +87,43 @@ def initialize_chat_engine(index):
|
|
94 |
)
|
95 |
return chat_engine
|
96 |
|
97 |
-
#
|
|
|
|
|
98 |
def generate_response(message, history, chat_engine):
|
99 |
-
if history is None:
|
100 |
history = []
|
101 |
|
102 |
chat_messages = [
|
103 |
ChatMessage(
|
104 |
role="system",
|
105 |
-
content="Anda adalah chatbot yang menjawab dalam bahasa Indonesia
|
106 |
-
"Pengguna: Apa itu lembur?\n"
|
107 |
-
"Chatbot: Lembur adalah pekerjaan yang dilakukan di luar jam kerja yang telah ditetapkan.\n"
|
108 |
-
"Pengguna: Apa syarat kerja di perusahaan?\n"
|
109 |
-
"Chatbot: Syarat kerja di perusahaan meliputi waktu kerja, gaji, dan tunjangan sesuai dengan peraturan perusahaan."
|
110 |
),
|
111 |
]
|
|
|
112 |
response = chat_engine.stream_chat(message)
|
113 |
-
text = "".join(response.response_gen) # Gabungkan semua token menjadi string
|
114 |
|
115 |
-
history.append((message, text))
|
116 |
return history
|
117 |
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
# Fungsi Utama untuk Menjalankan Aplikasi
|
122 |
def main():
|
123 |
-
# Unduh model dan inisialisasi pengaturan
|
124 |
model_path = initialize_llama_model()
|
125 |
initialize_settings(model_path)
|
126 |
|
127 |
-
# Inisialisasi index dan chat engine
|
128 |
index = initialize_index()
|
129 |
chat_engine = initialize_chat_engine(index)
|
130 |
|
131 |
-
# Fungsi untuk chat
|
132 |
def chatbot_response(message, history):
|
133 |
return generate_response(message, history, chat_engine)
|
134 |
|
135 |
-
# Luncurkan Gradio UI
|
136 |
gr.Interface(
|
137 |
fn=chatbot_response,
|
138 |
inputs=["text"],
|
|
|
1 |
# Import Library yang Diperlukan
|
2 |
import gradio as gr
|
3 |
+
import gspread
|
4 |
+
from oauth2client.service_account import ServiceAccountCredentials
|
|
|
5 |
from llama_cpp import Llama
|
6 |
+
from llama_index.core import VectorStoreIndex, Settings
|
7 |
+
from llama_index.core.node_parser import SentenceSplitter
|
|
|
8 |
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
|
9 |
+
from llama_index.llms.llama_cpp import LlamaCPP
|
10 |
from huggingface_hub import hf_hub_download
|
11 |
+
from llama_index.core.llms import ChatMessage
|
12 |
+
from llama_index.core.chat_engine.condense_plus_context import CondensePlusContextChatEngine
|
13 |
+
|
14 |
+
# ===================================
|
15 |
+
# 1️⃣ Fungsi untuk Membaca Google Spreadsheet
|
16 |
+
# ===================================
|
17 |
+
def read_google_sheet():
|
18 |
+
# Tentukan scope akses ke Google Sheets & Drive
|
19 |
+
scope = ["https://spreadsheets.google.com/feeds", "https://www.googleapis.com/auth/drive"]
|
20 |
+
|
21 |
+
# Load kredensial dari file credentials.json
|
22 |
+
creds = ServiceAccountCredentials.from_json_keyfile_name("credentials.json", scope)
|
23 |
+
client = gspread.authorize(creds)
|
24 |
+
|
25 |
+
# 📌 GANTI BAGIAN INI SESUAI SPREADSHEET ANDA
|
26 |
+
spreadsheet = client.open("datatarget") # 🔹 Ganti dengan nama spreadsheet Anda
|
27 |
+
sheet = spreadsheet.datatarget # 🔹 Jika ingin sheet lain, ganti dengan spreadsheet.worksheet("NamaSheet")
|
28 |
|
29 |
+
# Ambil semua data dalam bentuk list (baris & kolom)
|
30 |
+
data = sheet.get_all_values()
|
31 |
+
|
32 |
+
# Format ulang data menjadi satu teks panjang (dapat disesuaikan)
|
33 |
+
formatted_text = "\n".join([" | ".join(row) for row in data])
|
34 |
+
|
35 |
+
return formatted_text
|
36 |
+
|
37 |
+
# ===================================
|
38 |
+
# 2️⃣ Fungsi untuk Mengunduh Model Llama
|
39 |
+
# ===================================
|
40 |
def initialize_llama_model():
|
|
|
41 |
model_path = hf_hub_download(
|
42 |
+
repo_id="TheBLoke/zephyr-7b-beta-GGUF", # 📌 Repo model HuggingFace
|
43 |
+
filename="zephyr-7b-beta.Q4_K_M.gguf", # 📌 Nama file model
|
44 |
+
cache_dir="./models"
|
45 |
)
|
46 |
return model_path
|
47 |
|
48 |
+
# ===================================
|
49 |
+
# 3️⃣ Inisialisasi Model dan Pengaturan
|
50 |
+
# ===================================
|
51 |
+
def initialize_settings(model_path):
|
52 |
Settings.llm = LlamaCPP(
|
53 |
model_path=model_path,
|
54 |
temperature=0.7,
|
55 |
)
|
56 |
|
57 |
+
# ===================================
|
58 |
+
# 4️⃣ Inisialisasi Index dari Data Spreadsheet
|
59 |
+
# ===================================
|
60 |
def initialize_index():
|
61 |
+
# 🔹 Ambil teks dari Google Spreadsheet
|
62 |
+
text_data = read_google_sheet()
|
63 |
+
|
64 |
+
# 🔹 Konversi teks ke dalam format dokumen
|
65 |
+
documents = [text_data]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
+
# 🔹 Proses data menjadi node untuk vektor embedding
|
68 |
parser = SentenceSplitter(chunk_size=150, chunk_overlap=10)
|
69 |
nodes = parser.get_nodes_from_documents(documents)
|
70 |
+
|
71 |
+
# 🔹 Gunakan model embedding
|
72 |
embedding = HuggingFaceEmbedding("sentence-transformers/paraphrase-multilingual-MiniLM-L12-v2")
|
73 |
Settings.embed_model = embedding
|
74 |
+
|
75 |
+
# 🔹 Buat index vektor
|
76 |
index = VectorStoreIndex(nodes)
|
77 |
return index
|
78 |
|
79 |
+
# ===================================
|
80 |
+
# 5️⃣ Inisialisasi Mesin Chatbot
|
81 |
+
# ===================================
|
82 |
def initialize_chat_engine(index):
|
|
|
|
|
83 |
retriever = index.as_retriever(similarity_top_k=3)
|
84 |
chat_engine = CondensePlusContextChatEngine.from_defaults(
|
85 |
retriever=retriever,
|
|
|
87 |
)
|
88 |
return chat_engine
|
89 |
|
90 |
+
# ===================================
|
91 |
+
# 6️⃣ Fungsi untuk Menghasilkan Respons Chatbot
|
92 |
+
# ===================================
|
93 |
def generate_response(message, history, chat_engine):
|
94 |
+
if history is None:
|
95 |
history = []
|
96 |
|
97 |
chat_messages = [
|
98 |
ChatMessage(
|
99 |
role="system",
|
100 |
+
content="Anda adalah chatbot yang menjawab dalam bahasa Indonesia berdasarkan dokumen di Google Spreadsheet."
|
|
|
|
|
|
|
|
|
101 |
),
|
102 |
]
|
103 |
+
|
104 |
response = chat_engine.stream_chat(message)
|
105 |
+
text = "".join(response.response_gen) # 🔹 Gabungkan semua token menjadi string
|
106 |
|
107 |
+
history.append((message, text))
|
108 |
return history
|
109 |
|
110 |
+
# ===================================
|
111 |
+
# 7️⃣ Fungsi Utama untuk Menjalankan Aplikasi
|
112 |
+
# ===================================
|
|
|
113 |
def main():
|
114 |
+
# 🔹 Unduh model dan inisialisasi pengaturan
|
115 |
model_path = initialize_llama_model()
|
116 |
initialize_settings(model_path)
|
117 |
|
118 |
+
# 🔹 Inisialisasi index dan chat engine
|
119 |
index = initialize_index()
|
120 |
chat_engine = initialize_chat_engine(index)
|
121 |
|
122 |
+
# 🔹 Fungsi untuk chat
|
123 |
def chatbot_response(message, history):
|
124 |
return generate_response(message, history, chat_engine)
|
125 |
|
126 |
+
# 🔹 Luncurkan Gradio UI
|
127 |
gr.Interface(
|
128 |
fn=chatbot_response,
|
129 |
inputs=["text"],
|