aquibmoin commited on
Commit
c9a2e13
·
verified ·
1 Parent(s): 984b8ae

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +67 -111
app.py CHANGED
@@ -1,124 +1,80 @@
1
- # Re-build-1
2
-
3
  import gradio as gr
4
- import faiss
5
- import numpy as np
6
- import os
7
- from datasets import load_dataset, Dataset
8
- from huggingface_hub import HfApi, hf_hub_download
9
- from PyPDF2 import PdfReader
10
  from transformers import AutoTokenizer, AutoModel
11
  import torch
 
 
 
 
12
 
13
- # Set HF Dataset Name & Index File
14
- HF_DATASET_NAME = "aquibmoin/SCDD-Embeddings"
15
- INDEX_FILE = "faiss_index.faiss"
16
-
17
- # Load NASA Bi-Encoder
18
- bi_encoder_model_name = "nasa-impact/nasa-smd-ibm-st-v2"
19
- bi_tokenizer = AutoTokenizer.from_pretrained(bi_encoder_model_name)
20
- bi_model = AutoModel.from_pretrained(bi_encoder_model_name)
21
-
22
- # Initialize HF API
23
- hf_api = HfApi()
24
-
25
- # Function to extract text from a PDF
26
- def extract_text_from_pdf(pdf_file):
27
- text = ""
28
- with pdf_file as f:
29
- reader = PdfReader(f)
30
- for page in reader.pages:
31
- text += page.extract_text() + "\n"
32
- return text
33
-
34
- # Function to split text into chunks
35
- def get_chunks(text, chunk_size=500):
36
- return [text[i:i+chunk_size] for i in range(0, len(text), chunk_size)]
37
-
38
- # Function to generate embeddings
39
- def generate_embedding(text):
40
- inputs = bi_tokenizer(text, return_tensors="pt", padding=True, truncation=True, max_length=512)
41
- with torch.no_grad():
42
- outputs = bi_model(**inputs)
43
- embedding = outputs.last_hidden_state.mean(dim=1).squeeze().numpy()
44
- return embedding / np.linalg.norm(embedding) # Normalize for FAISS
45
-
46
- # Function to load existing FAISS index from HF
47
- def load_existing_faiss_index():
48
- try:
49
- index_path = hf_hub_download(repo_id=HF_DATASET_NAME, filename=INDEX_FILE, repo_type="dataset")
50
- index = faiss.read_index(index_path)
51
- print("✅ Loaded existing FAISS index.")
52
- return index
53
- except:
54
- print("⚠️ No existing FAISS index found. Creating a new one.")
55
- return faiss.IndexFlatIP(768)
56
-
57
- # Main function to process PDFs & update HF Dataset
58
- def process_pdfs_and_store(pdf_files):
59
- index = load_existing_faiss_index()
60
-
61
- try:
62
- dataset = load_dataset(HF_DATASET_NAME, split="train")
63
- existing_chunks = dataset["chunk_text"]
64
- existing_embeddings = [np.array(emb) for emb in dataset["embedding"]]
65
- existing_files = dataset["source_file"]
66
- except:
67
- existing_chunks, existing_embeddings, existing_files = [], [], []
68
-
69
- all_chunks, all_embeddings = [], []
70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
71
  for pdf_file in pdf_files:
72
- text = extract_text_from_pdf(pdf_file)
73
- chunks = get_chunks(text)
74
- embeddings = [generate_embedding(chunk) for chunk in chunks]
75
-
76
- all_chunks.extend(chunks)
77
- all_embeddings.extend(embeddings)
78
-
79
- all_embeddings_np = np.array(all_embeddings)
80
 
81
- # Append new embeddings & chunks to the existing ones
82
- combined_chunks = existing_chunks + all_chunks
83
- combined_embeddings = existing_embeddings + list(all_embeddings_np)
84
- combined_files = existing_files + [pdf_file.name for pdf_file in pdf_files for _ in range(len(all_chunks))]
85
 
86
- combined_embeddings_np = np.array(combined_embeddings)
 
 
 
 
87
 
88
- # Update FAISS Index
89
- index.add(all_embeddings_np)
90
 
91
- # Save & Upload Updated FAISS Index
92
- faiss.write_index(index, INDEX_FILE)
93
- hf_api.upload_file(path_or_fileobj=INDEX_FILE, path_in_repo=INDEX_FILE, repo_id=HF_DATASET_NAME, repo_type="dataset")
94
 
95
- # Update & Push Dataset
96
- dataset_dict = {
97
- "chunk_text": combined_chunks,
98
- "embedding": [emb.tolist() for emb in combined_embeddings_np],
99
- "source_file": combined_files
100
- }
 
 
101
 
102
- dataset = Dataset.from_dict(dataset_dict)
103
- dataset.push_to_hub(HF_DATASET_NAME, split="train")
104
-
105
- return f"✅ Successfully updated FAISS index & embeddings in {HF_DATASET_NAME}. Total Chunks: {len(combined_chunks)}."
106
-
107
- # Gradio UI
108
- with gr.Blocks() as demo:
109
- gr.Markdown("# 🚀 SCDD Embeddings Generator - Hugging Face Spaces")
110
- gr.Markdown("Upload PDFs to generate and store embeddings in a FAISS vector store on Hugging Face.")
111
-
112
- pdf_input = gr.Files(file_types=[".pdf"], label="Upload Reference PDFs (Up to 3)", interactive=True)
113
- submit_button = gr.Button("Generate & Store Embeddings")
114
-
115
- output_text = gr.Textbox(label="Status")
116
-
117
- submit_button.click(
118
- fn=process_pdfs_and_store,
119
- inputs=[pdf_input],
120
- outputs=[output_text]
121
- )
122
 
123
- # Launch Gradio App
124
- demo.launch(share=True)
 
 
 
1
  import gradio as gr
 
 
 
 
 
 
2
  from transformers import AutoTokenizer, AutoModel
3
  import torch
4
+ import numpy as np
5
+ from PyPDF2 import PdfReader
6
+ from pinecone import Pinecone, ServerlessSpec, CloudProvider, AwsRegion, VectorType
7
+ import os
8
 
9
+ # Load NASA-specific bi-encoder model
10
+ tokenizer = AutoTokenizer.from_pretrained("nasa-impact/nasa-smd-ibm-st-v2")
11
+ model = AutoModel.from_pretrained("nasa-impact/nasa-smd-ibm-st-v2")
12
+
13
+ # Initialize Pinecone client
14
+ pinecone_api_key = os.getenv('PINECONE_API_KEY')
15
+ pc = Pinecone(api_key=pinecone_api_key)
16
+
17
+ # Create Pinecone index if it doesn't exist
18
+ index_name = "scdd-index"
19
+ if index_name not in pc.list_indexes().names():
20
+ pc.create_index(
21
+ name=index_name,
22
+ dimension=768,
23
+ spec=ServerlessSpec(
24
+ cloud=CloudProvider.AWS,
25
+ region=AwsRegion.US_EAST_1
26
+ ),
27
+ vector_type=VectorType.DENSE,
28
+ metric="cosine"
29
+ )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
31
+ # Connect to the Pinecone index
32
+ index = pc.Index(index_name)
33
+
34
+ # Function to encode text using bi-encoder in batches
35
+ def encode_chunks_batch(chunks, batch_size=8):
36
+ embeddings = []
37
+ for i in range(0, len(chunks), batch_size):
38
+ batch_chunks = chunks[i:i+batch_size]
39
+ inputs = tokenizer(batch_chunks, return_tensors='pt', padding=True, truncation=True, max_length=128)
40
+ with torch.no_grad():
41
+ output = model(**inputs)
42
+ batch_embeddings = output.last_hidden_state.mean(dim=1)
43
+ batch_embeddings = batch_embeddings / batch_embeddings.norm(dim=1, keepdim=True)
44
+ embeddings.extend(batch_embeddings.cpu().numpy())
45
+ return embeddings
46
+
47
+ # Function to process PDFs and upsert embeddings to Pinecone
48
+ def process_pdfs(pdf_files):
49
  for pdf_file in pdf_files:
50
+ reader = PdfReader(pdf_file.name)
51
+ pdf_text = "".join(page.extract_text() for page in reader.pages if page.extract_text())
52
+
53
+ # Split text into smaller chunks
54
+ chunks = [pdf_text[i:i+500] for i in range(0, len(pdf_text), 500)]
 
 
 
55
 
56
+ # Generate embeddings in batches
57
+ embeddings = encode_chunks_batch(chunks, batch_size=8)
 
 
58
 
59
+ # Prepare data for Pinecone
60
+ vectors = [
61
+ (f"{os.path.basename(pdf_file.name)}-chunk-{idx}", embedding.tolist(), {"text": chunk})
62
+ for idx, (embedding, chunk) in enumerate(zip(embeddings, chunks))
63
+ ]
64
 
65
+ # Upsert embeddings into Pinecone
66
+ index.upsert(vectors)
67
 
68
+ return f"Processed {len(pdf_files)} PDF(s) successfully and embeddings stored in Pinecone."
 
 
69
 
70
+ # Gradio Interface
71
+ demo = gr.Interface(
72
+ fn=process_pdfs,
73
+ inputs=gr.Files(label="Upload PDFs", file_types=[".pdf"]),
74
+ outputs="text",
75
+ title="NASA Bi-encoder PDF Embedding & Pinecone Storage",
76
+ description="Upload PDF files to generate embeddings with NASA Bi-encoder and store in Pinecone."
77
+ )
78
 
79
+ demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
80