tmdeptrai3012 commited on
Commit
f459b26
·
verified ·
1 Parent(s): f869ca5

deploy 2025-08-09 16:48:55

Browse files
.dockerignore CHANGED
@@ -1 +1,5 @@
1
  node_modules
 
 
 
 
 
1
  node_modules
2
+ .git
3
+ __pycache__
4
+ *.pyc
5
+ .env
.gitattributes CHANGED
@@ -40,3 +40,6 @@ figures/RAG_pipeline.png filter=lfs diff=lfs merge=lfs -text
40
  figures/demo.gif filter=lfs diff=lfs merge=lfs -text
41
  figures/demo.mp4 filter=lfs diff=lfs merge=lfs -text
42
  figures/grafana.png filter=lfs diff=lfs merge=lfs -text
 
 
 
 
40
  figures/demo.gif filter=lfs diff=lfs merge=lfs -text
41
  figures/demo.mp4 filter=lfs diff=lfs merge=lfs -text
42
  figures/grafana.png filter=lfs diff=lfs merge=lfs -text
43
+ downloaded_pdfs/AmazingFacts.pdf filter=lfs diff=lfs merge=lfs -text
44
+ downloaded_pdfs/contrat_2c969eb69791039801980498c3333638_Liasse_DocumentASigner_RESIDENCE[[:space:]]COUREILLES_T1-20250713-182255.929_11.pdf.pdf filter=lfs diff=lfs merge=lfs -text
45
+ downloaded_pdfs/hop-dong-thue-nha-tro_2810165040_2011153218_0804150519.pdf filter=lfs diff=lfs merge=lfs -text
Dockerfile CHANGED
@@ -1,4 +1,4 @@
1
- FROM python:3.10-slim
2
 
3
  ENV PYTHONDONTWRITEBYTECODE=1 \
4
  PYTHONUNBUFFERED=1 \
@@ -7,14 +7,21 @@ ENV PYTHONDONTWRITEBYTECODE=1 \
7
 
8
  WORKDIR /app
9
 
10
- # Rest of the code
11
- COPY . .
 
 
 
12
 
13
- # Install backend dependencies
14
- RUN pip install --no-cache-dir -r requirements.txt
 
 
 
 
15
 
16
  # Expose backend and frontend ports
17
  EXPOSE 3012 8080
18
 
19
- # Start both backend (FastAPI) and serve frontend (using Python's http.server)
20
  CMD ["sh", "-c", "uvicorn backend.main:app --host 0.0.0.0 --port 3012 & python3 -m http.server 8080 --directory frontend"]
 
1
+ FROM python:3.10-slim AS base
2
 
3
  ENV PYTHONDONTWRITEBYTECODE=1 \
4
  PYTHONUNBUFFERED=1 \
 
7
 
8
  WORKDIR /app
9
 
10
+ # Install build dependencies only if needed (optional)
11
+ # RUN apt-get update && apt-get install -y --no-install-recommends build-essential && rm -rf /var/lib/apt/lists/*
12
+
13
+ # Copy only requirements first (cacheable layer)
14
+ COPY requirements.txt .
15
 
16
+ # Install backend dependencies (cached if requirements.txt hasn't changed)
17
+ RUN --mount=type=cache,target=/root/.cache \
18
+ pip install -r requirements.txt
19
+
20
+ # Now copy the rest of the application
21
+ COPY . .
22
 
23
  # Expose backend and frontend ports
24
  EXPOSE 3012 8080
25
 
26
+ # Start both backend and frontend
27
  CMD ["sh", "-c", "uvicorn backend.main:app --host 0.0.0.0 --port 3012 & python3 -m http.server 8080 --directory frontend"]
README.md CHANGED
@@ -1,12 +1,3 @@
1
- ---
2
- title: Basic Docker SDK Space
3
- emoji: 🐳
4
- colorFrom: purple
5
- colorTo: gray
6
- sdk: docker
7
- app_port: 8000
8
- ---
9
-
10
  # Legal Contract Analyzer
11
  A MLOps project of an AI-powered RAG Chatbot for understanding and querying legal documents. Together with CI/CD, monitoring and visualization.
12
 
@@ -35,6 +26,8 @@ With the power of RAG, the answers are now more precise, the LLM experiences les
35
 
36
  [Watch the full demo video](https://youtu.be/kvJwAMWmvj0)
37
 
 
 
38
  ### Monitoring and Visualization with Prometheus + Grafana:
39
 
40
  ![Prometheus](./figures/prometheus.png)
@@ -124,19 +117,20 @@ docker compose build .
124
  docker compose up -d
125
  ```
126
 
127
- Access the web app frontend at: http://localhost:8080
128
 
129
- Acces the monitoring Grafana at: http://localhost:3000
 
 
130
 
131
  *Note*: Username / password for Grafana should be admin / admin, go to Dashboards and select the panel to view metrics
132
 
133
- Other ports if you're interested:
134
 
135
  Backend: http://localhost:3012
136
 
137
  Prometheus: http://localhost:9090
138
 
139
-
140
  ## API Endpoints
141
 
142
  Available endpoints for backend:
@@ -148,7 +142,7 @@ Available endpoints for backend:
148
  |POST /ingest | Embeds and stores chunks
149
  |POST /query | Retrieves top-K chunks for a query
150
  |POST /rag_chat | Full chat with RAG streaming
151
- GET /api_key | Exposes env vars (for dev)
152
 
153
  ## Monitoring
154
 
 
 
 
 
 
 
 
 
 
 
1
  # Legal Contract Analyzer
2
  A MLOps project of an AI-powered RAG Chatbot for understanding and querying legal documents. Together with CI/CD, monitoring and visualization.
3
 
 
26
 
27
  [Watch the full demo video](https://youtu.be/kvJwAMWmvj0)
28
 
29
+ Demo on Render: https://legalcontractanalyzer.onrender.com/
30
+
31
  ### Monitoring and Visualization with Prometheus + Grafana:
32
 
33
  ![Prometheus](./figures/prometheus.png)
 
117
  docker compose up -d
118
  ```
119
 
120
+ 3. Access the web app frontend at: http://localhost:8080
121
 
122
+ Start by uploading a file then ask a question like: "Thời hạn hợp đồng kéo dài trong bao lâu?"
123
+
124
+ 4. Acces the monitoring Grafana at: http://localhost:3000
125
 
126
  *Note*: Username / password for Grafana should be admin / admin, go to Dashboards and select the panel to view metrics
127
 
128
+ 5. Other ports if you're interested:
129
 
130
  Backend: http://localhost:3012
131
 
132
  Prometheus: http://localhost:9090
133
 
 
134
  ## API Endpoints
135
 
136
  Available endpoints for backend:
 
142
  |POST /ingest | Embeds and stores chunks
143
  |POST /query | Retrieves top-K chunks for a query
144
  |POST /rag_chat | Full chat with RAG streaming
145
+ |GET /api_key | Exposes env vars (for dev)
146
 
147
  ## Monitoring
148
 
backend/chroma_vector_db/chroma.sqlite3 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7503eebf53b9f9e4e18950d5a04cedfe613e13bcd2215857e79a643241795d31
3
- size 540672
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bedaaed932573c2da9f3c8ad609773f192cb3e027c9aabc8927f9567f35b288b
3
+ size 3514368
backend/query.py CHANGED
@@ -1,43 +1,67 @@
1
- import os
 
2
  import chromadb
3
  from openai import OpenAI
4
  from dotenv import load_dotenv
5
  from backend.config import CHROMA_DB_PATH
6
- # ─── ENVIRONMENT ──────────────────────────────────────────────────────────────
 
7
  load_dotenv()
8
- API_KEY = os.getenv("OPENAI_API_KEY", "TRANMINHDUONGDEPTRAI")
9
- BASE_URL = "https://glowing-workable-arachnid.ngrok-free.app/v1" # or ngrok URL
10
  openai_client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
11
 
12
- # ─── CHROMA SETUP ─────────────────────────────────────────────────────────────
13
  chroma_client = chromadb.PersistentClient(path=CHROMA_DB_PATH)
14
  collection = chroma_client.get_or_create_collection("legal_docs")
15
 
16
- # ─── EMBEDDING FUNCTION ───────────────────────────────────────────────────────
17
- def embed_query(query_text):
18
  resp = openai_client.embeddings.create(
19
  model="Qwen3-0.6B",
20
- input=[query_text]
21
  )
22
- return resp.data[0].embedding
23
-
24
- # ─── TOP-K RETRIEVAL ──────────────────────────────────────────────────────────
25
- def query_top_k(query_text, k=5):
26
- query_emb = embed_query(query_text)
27
- results = collection.query(
28
- query_embeddings=[query_emb],
29
- n_results=k
30
- )
31
- # results['documents'] is a list of lists (one per query)
32
- # results['distances'] is a list of lists (one per query)
33
- # We'll return a list of (chunk, distance) tuples
34
- docs = results['documents'][0] if results['documents'] else []
35
- dists = results['distances'][0] if results['distances'] else []
36
- return list(zip(docs, dists))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
 
38
  # Example usage:
39
  if __name__ == "__main__":
40
- q = "Sẽ ra sao nếu một trong hai bên muốn chấm dứt hợp đồng trước thời hạn?"
41
  top_chunks = query_top_k(q, k=3)
42
  for chunk, dist in top_chunks:
43
  print(f"Score: {dist:.4f}\n{chunk}\n{'-'*40}")
 
1
+ # backend/query.py
2
+ import numpy as np
3
  import chromadb
4
  from openai import OpenAI
5
  from dotenv import load_dotenv
6
  from backend.config import CHROMA_DB_PATH
7
+ import os
8
+
9
  load_dotenv()
10
+ API_KEY = os.getenv("OPENAI_API_KEY")
11
+ BASE_URL = os.getenv("FASTCHAT_URL", "https://glowing-workable-arachnid.ngrok-free.app/v1")
12
  openai_client = OpenAI(api_key=API_KEY, base_url=BASE_URL)
13
 
 
14
  chroma_client = chromadb.PersistentClient(path=CHROMA_DB_PATH)
15
  collection = chroma_client.get_or_create_collection("legal_docs")
16
 
17
+ def embed_texts(texts):
 
18
  resp = openai_client.embeddings.create(
19
  model="Qwen3-0.6B",
20
+ input=texts
21
  )
22
+ # ensure order
23
+ return [item.embedding for item in sorted(resp.data, key=lambda d: d.index)]
24
+
25
+ def normalize(vec):
26
+ arr = np.array(vec, dtype=np.float32)
27
+ return arr / (np.linalg.norm(arr) + 1e-10)
28
+
29
+ def query_top_k(query_text, k=10, rerank_top_n=5):
30
+ # 1) embed
31
+ q_emb = embed_texts([query_text])[0]
32
+ q_norm = normalize(q_emb)
33
+
34
+ # 2) dense retrieval (get more candidates)
35
+ results = collection.query(query_embeddings=[q_emb], n_results=k)
36
+ docs = results.get('documents', [[]])[0]
37
+ dists = results.get('distances', [[]])[0]
38
+
39
+ # Note: Chroma distances are lower = better. We'll compute cosine from stored embeddings if available.
40
+ # If you stored embeddings in collection, pull them (some Chroma versions allow include=['embeddings'])
41
+ # Here we fallback to converting distance -> similarity (if the metric is cosine)
42
+ sims = []
43
+ for idx, doc in enumerate(docs):
44
+ # try to get the stored embedding if available:
45
+ try:
46
+ emb = results['embeddings'][0][idx]
47
+ sim = float(np.dot(q_norm, normalize(emb)))
48
+ except Exception:
49
+ # fallback: invert distance (only approximate)
50
+ dist = dists[idx] if idx < len(dists) else 1.0
51
+ sim = 1.0 - float(dist)
52
+ sims.append((doc, sim))
53
+
54
+ # sort by similarity desc
55
+ sims.sort(key=lambda x: x[1], reverse=True)
56
+
57
+ # optional: rerank top candidates with a cross-encoder here
58
+
59
+ return sims[:rerank_top_n] # return top rerank_top_n with similarity
60
+
61
 
62
  # Example usage:
63
  if __name__ == "__main__":
64
+ q = "An interesting fact about the humming bird"
65
  top_chunks = query_top_k(q, k=3)
66
  for chunk, dist in top_chunks:
67
  print(f"Score: {dist:.4f}\n{chunk}\n{'-'*40}")
downloaded_pdfs/AmazingFacts.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97d6ce2512ad2c8df2ef58709e2ad00186acc7eb3dad9da3554eb92f13eac531
3
+ size 127901
downloaded_pdfs/contrat_2c969eb69791039801980498c3333638_Liasse_DocumentASigner_RESIDENCE COUREILLES_T1-20250713-182255.929_11.pdf.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c75cb43e380be568c00346f4b56dfab7447de39a35c77bae63c25a881ba7b7aa
3
+ size 993576
downloaded_pdfs/hop-dong-thue-nha-tro_2810165040_2011153218_0804150519.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:797d41945ea3b81f01163d86215cb7f7a0277c71f085429bc6820aa18e2f4a50
3
+ size 490064
frontend/script.js CHANGED
@@ -254,14 +254,30 @@ document.addEventListener("DOMContentLoaded", function () {
254
  embeddedFiles[file.name] = true;
255
  uploadedCount++;
256
  renderFileList();
257
- updateStatus(`Uploaded ${file.name} to Supabase!`, "success");
258
  } catch (err) {
259
- updateStatus(`Error uploading ${file.name}: ${err.message || err}`, "error");
260
  }
261
  }
 
262
  if (uploadedCount > 0) {
263
- updateStatus(`Uploaded ${uploadedCount} file(s) to Supabase!`, "success");
264
- embeddingStatus.textContent = "Uploaded";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
265
  } else {
266
  updateStatus("No new files uploaded.", "warning");
267
  embeddingStatus.textContent = "Ready";
 
254
  embeddedFiles[file.name] = true;
255
  uploadedCount++;
256
  renderFileList();
257
+ updateStatus("Uploaded " + file.name + " to Supabase!", "success");
258
  } catch (err) {
259
+ updateStatus("Error uploading " + file.name + ": " + (err.message || err), "error");
260
  }
261
  }
262
+ // After uploading, call backend to retrieve and ingest documents
263
  if (uploadedCount > 0) {
264
+ disableChat()
265
+ updateStatus("Calling backend to retrieve documents...", "processing");
266
+ try {
267
+ const retrieveRes = await fetch("http://localhost:3012/retrieve_documents", { method: "POST" });
268
+ if (!retrieveRes.ok) throw new Error("Failed to retrieve documents");
269
+ updateStatus("Retrieving documents complete. Now ingesting...", "processing");
270
+ console.log("Retrieving documents complete. Now ingesting...")
271
+ const ingestRes = await fetch("http://localhost:3012/ingest", { method: "POST" });
272
+ if (!ingestRes.ok) throw new Error("Failed to ingest documents");
273
+ updateStatus("Uploaded and embedded " + uploadedCount + " file(s) successfully!", "success");
274
+ console.log("Successfully embedded the documents!")
275
+ embeddingStatus.textContent = "Uploaded";
276
+ } catch (err) {
277
+ updateStatus("Error embedding documents: " + (err.message || err), "error");
278
+ embeddingStatus.textContent = "Error";
279
+ }
280
+ enableChat()
281
  } else {
282
  updateStatus("No new files uploaded.", "warning");
283
  embeddingStatus.textContent = "Ready";