fionasu's picture
updated token
8b1957b verified
raw
history blame
4.39 kB
import gradio as gr
from huggingface_hub import InferenceClient
#STEP 1 FROM SEMATIC SEARCH
from sentence_transformers import SentenceTransformer
import torch
#STEP 2 FROM SEMATIC SEARCH
# Open the water_cycle.txt file in read mode with UTF-8 encoding
with open("water_cycle.txt", "r", encoding="utf-8") as file:
# Read the entire contents of the file and store it in a variable
water_cycle_text = file.read()
print(water_cycle_text)
#STEP 3 FROM SEMATIC SEARCH
def preprocess_text(text):
# Strip extra whitespace from the beginning and the end of the text
cleaned_text = text.strip()
# Split the cleaned_text by every newline character (\n)
chunks = cleaned_text.split("\n")
# Create an empty list to store cleaned chunks
cleaned_chunks = []
# Write your for-in loop below to clean each chunk and add it to the cleaned_chunks list
for chunk in chunks:
chunk.strip()
if chunk != "":
cleaned_chunks.append(chunk)
# Print cleaned_chunks
print(cleaned_chunks)
# Print the length of cleaned_chunks
print(len(cleaned_chunks))
# Return the cleaned_chunks
return cleaned_chunks
# Call the preprocess_text function and store the result in a cleaned_chunks variable
cleaned_chunks = preprocess_text(water_cycle_text) # Complete this line
#STEP 4 FROM SEMATIC SEARCH
# Load the pre-trained embedding model that converts text to vectors
model = SentenceTransformer('all-MiniLM-L6-v2')
def create_embeddings(text_chunks):
# Convert each text chunk into a vector embedding and store as a tensor
chunk_embeddings = model.encode(text_chunks, convert_to_tensor=True) # Replace ... with the text_chunks list
# Print the chunk embeddings
print(chunk_embeddings)
# Print the shape of chunk_embeddings
print(chunk_embeddings.shape)
# Return the chunk_embeddings
return chunk_embeddings
# Call the create_embeddings function and store the result in a new chunk_embeddings variable
chunk_embeddings = create_embeddings(cleaned_chunks) # Complete this line
#STEP 5 FROM SEMATIC SEARCH
# Define a function to find the most relevant text chunks for a given query, chunk_embeddings, and text_chunks
def get_top_chunks(query, chunk_embeddings, text_chunks):
# Convert the query text into a vector embedding
query_embedding = model.encode(query, convert_to_tensor = True) # Complete this line
# Normalize the query embedding to unit length for accurate similarity comparison
query_embedding_normalized = query_embedding / query_embedding.norm()
# Normalize all chunk embeddings to unit length for consistent comparison
chunk_embeddings_normalized = chunk_embeddings / chunk_embeddings.norm(dim=1, keepdim=True)
# Calculate cosine similarity between query and all chunks using matrix multiplication
similarities = torch.matmul(chunk_embeddings_normalized, query_embedding_normalized) # Complete this line
# Print the similarities
print(similarities)
# Find the indices of the 3 chunks with highest similarity scores
top_indices = torch.topk(similarities, k=3).indices
# Print the top indices
print(top_indices)
# Create an empty list to store the most relevant chunks
top_chunks = []
# Loop through the top indices and retrieve the corresponding text chunks
for top_index in top_indices:
top_chunks.append(text_chunks[top_index])
# Return the list of most relevant chunks
return top_chunks
#STEP 6 FROM SEMATIC SEARCH
# Call the get_top_chunks function with the original query
top_results = get_top_chunks("How do you make banana bread?", chunk_embeddings, cleaned_chunks)
# Print the top results
print(top_results)
client = InferenceClient("Qwen/Qwen2.5-72B-Instruct")
def respond(message, history):
top_results = get_top_chunks(message, chunk_embeddings, cleaned_chunks)
print(top_results)
str_top_results = "\n".join(top_results)
messages = [{"role": "system", "content": f"You're a friendly and gen z chatbot. Base your response on the provided context: {top_results}."}]
if history:
messages.extend(history)
messages.append({"role": "user", "content": message})
response = client.chat_completion(
messages,
max_tokens = 1000,
temperature = 1
)
return response['choices'][0]['message']['content'].strip()
chatbot = gr.ChatInterface(respond, type = 'messages')
chatbot.launch(debug = True)