Spaces:
Sleeping
Sleeping
import gradio as gr | |
import requests | |
import torch | |
from transformers import AutoTokenizer, AutoModelForSequenceClassification | |
tokenizer = AutoTokenizer.from_pretrained("typeform/distilbert-base-uncased-mnli") | |
model = AutoModelForSequenceClassification.from_pretrained("typeform/distilbert-base-uncased-mnli") | |
# Label mapping (matches MNLI outputs) | |
label_mapping = ["entailment", "neutral", "contradiction"] | |
def check_entailment(premise, hypothesis): | |
""" | |
Computes the entailment scores between a premise and a hypothesis. | |
Call this function several times for each RAG similarity. | |
Args: | |
premise (str): The reference text. | |
hypothesis (str): The statement to check. | |
Returns: | |
dict: A dictionary containing scores for entailment, neutral, and contradiction. | |
""" | |
inputs = tokenizer(premise, hypothesis, return_tensors="pt") | |
with torch.no_grad(): | |
logits = model(**inputs).logits | |
# Apply softmax | |
probabilities = torch.nn.functional.softmax(logits, dim=-1)[0] | |
# Convert to dictionary | |
scores = {label_mapping[i]: probabilities[i].item() for i in range(len(label_mapping))} | |
return scores | |
demo = gr.Interface( | |
fn=check_entailment, | |
inputs=["text", "text"], | |
outputs=["json"] | |
) | |
demo.launch(share=True) | |
api_url = demo.share_url | |
print(api_url) |