Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -4,21 +4,25 @@ from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
4 |
import torch
|
5 |
import gradio as gr
|
6 |
|
7 |
-
#
|
8 |
hf_token = os.getenv("HF_TOKEN")
|
9 |
if not hf_token:
|
10 |
raise ValueError("Please set your HF_TOKEN environment variable securely.")
|
11 |
|
|
|
12 |
login(token=hf_token)
|
13 |
|
|
|
14 |
model_name = "mistralai/Mistral-7B-Instruct-v0.2"
|
15 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
16 |
print(f"Using device: {device}")
|
17 |
|
|
|
18 |
tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
|
19 |
if tokenizer.pad_token is None:
|
20 |
tokenizer.pad_token = tokenizer.eos_token
|
21 |
|
|
|
22 |
model = AutoModelForCausalLM.from_pretrained(
|
23 |
model_name,
|
24 |
device_map="auto" if torch.cuda.is_available() else None,
|
@@ -30,6 +34,7 @@ model = AutoModelForCausalLM.from_pretrained(
|
|
30 |
if not torch.cuda.is_available():
|
31 |
model = model.to(device)
|
32 |
|
|
|
33 |
def generate_text(prompt):
|
34 |
if not prompt.strip():
|
35 |
return "Please enter a valid prompt."
|
@@ -41,7 +46,14 @@ def generate_text(prompt):
|
|
41 |
tokenize=False,
|
42 |
add_generation_prompt=True
|
43 |
)
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
46 |
|
47 |
with torch.no_grad():
|
@@ -54,7 +66,7 @@ def generate_text(prompt):
|
|
54 |
pad_token_id=tokenizer.eos_token_id,
|
55 |
eos_token_id=tokenizer.eos_token_id
|
56 |
)
|
57 |
-
|
58 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
59 |
generated_text = response[len(formatted_prompt):].strip()
|
60 |
return generated_text
|
@@ -62,10 +74,18 @@ def generate_text(prompt):
|
|
62 |
except Exception as e:
|
63 |
return f"Error generating text: {str(e)}"
|
64 |
|
|
|
65 |
iface = gr.Interface(
|
66 |
fn=generate_text,
|
67 |
-
inputs=gr.Textbox(
|
68 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
69 |
title="🤖 Mistral-7B Chat",
|
70 |
description="Chat with Mistral-7B-Instruct model. Enter your message and get AI-generated responses.",
|
71 |
examples=[
|
@@ -77,5 +97,10 @@ iface = gr.Interface(
|
|
77 |
cache_examples=False
|
78 |
)
|
79 |
|
|
|
80 |
if __name__ == "__main__":
|
81 |
-
iface.launch(
|
|
|
|
|
|
|
|
|
|
4 |
import torch
|
5 |
import gradio as gr
|
6 |
|
7 |
+
# Get token from environment (secure via Repository Secrets in HF Space)
|
8 |
hf_token = os.getenv("HF_TOKEN")
|
9 |
if not hf_token:
|
10 |
raise ValueError("Please set your HF_TOKEN environment variable securely.")
|
11 |
|
12 |
+
# Login to Hugging Face Hub
|
13 |
login(token=hf_token)
|
14 |
|
15 |
+
# Model and device setup
|
16 |
model_name = "mistralai/Mistral-7B-Instruct-v0.2"
|
17 |
device = "cuda" if torch.cuda.is_available() else "cpu"
|
18 |
print(f"Using device: {device}")
|
19 |
|
20 |
+
# Load tokenizer
|
21 |
tokenizer = AutoTokenizer.from_pretrained(model_name, token=hf_token)
|
22 |
if tokenizer.pad_token is None:
|
23 |
tokenizer.pad_token = tokenizer.eos_token
|
24 |
|
25 |
+
# Load model
|
26 |
model = AutoModelForCausalLM.from_pretrained(
|
27 |
model_name,
|
28 |
device_map="auto" if torch.cuda.is_available() else None,
|
|
|
34 |
if not torch.cuda.is_available():
|
35 |
model = model.to(device)
|
36 |
|
37 |
+
# Generation function
|
38 |
def generate_text(prompt):
|
39 |
if not prompt.strip():
|
40 |
return "Please enter a valid prompt."
|
|
|
46 |
tokenize=False,
|
47 |
add_generation_prompt=True
|
48 |
)
|
49 |
+
|
50 |
+
inputs = tokenizer(
|
51 |
+
formatted_prompt,
|
52 |
+
return_tensors="pt",
|
53 |
+
padding=True,
|
54 |
+
truncation=True,
|
55 |
+
max_length=2048
|
56 |
+
)
|
57 |
inputs = {k: v.to(model.device) for k, v in inputs.items()}
|
58 |
|
59 |
with torch.no_grad():
|
|
|
66 |
pad_token_id=tokenizer.eos_token_id,
|
67 |
eos_token_id=tokenizer.eos_token_id
|
68 |
)
|
69 |
+
|
70 |
response = tokenizer.decode(outputs[0], skip_special_tokens=True)
|
71 |
generated_text = response[len(formatted_prompt):].strip()
|
72 |
return generated_text
|
|
|
74 |
except Exception as e:
|
75 |
return f"Error generating text: {str(e)}"
|
76 |
|
77 |
+
# Gradio interface
|
78 |
iface = gr.Interface(
|
79 |
fn=generate_text,
|
80 |
+
inputs=gr.Textbox(
|
81 |
+
lines=3,
|
82 |
+
placeholder="Enter your prompt here...",
|
83 |
+
label="Your Message"
|
84 |
+
),
|
85 |
+
outputs=gr.Textbox(
|
86 |
+
label="Mistral Response",
|
87 |
+
lines=5
|
88 |
+
),
|
89 |
title="🤖 Mistral-7B Chat",
|
90 |
description="Chat with Mistral-7B-Instruct model. Enter your message and get AI-generated responses.",
|
91 |
examples=[
|
|
|
97 |
cache_examples=False
|
98 |
)
|
99 |
|
100 |
+
# Run app
|
101 |
if __name__ == "__main__":
|
102 |
+
iface.launch(
|
103 |
+
server_name="0.0.0.0",
|
104 |
+
server_port=7860,
|
105 |
+
share=False
|
106 |
+
)
|