pcuenq HF Staff commited on
Commit
0e34dde
·
verified ·
1 Parent(s): b6f4106

update gradio (#2)

Browse files

- Update for latest gradio, instantiate model once (e746a51fc506ad7c39ad2f86a73dbc94e90e23c2)
- Update requirements (386822cad5a502f46655cef798e1e294eea99635)

Files changed (2) hide show
  1. app.py +8 -9
  2. requirements.txt +3 -3
app.py CHANGED
@@ -1,21 +1,20 @@
1
  import gradio as gr
2
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
3
 
 
 
 
 
4
  def translate(text):
5
- model_name = 'hackathon-pln-es/t5-small-finetuned-spanish-to-quechua'
6
- model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
7
- tokenizer = AutoTokenizer.from_pretrained(model_name)
8
-
9
  input = tokenizer(text, return_tensors="pt")
10
  output = model.generate(input["input_ids"], max_length=40, num_beams=4, early_stopping=True)
11
-
12
  return tokenizer.decode(output[0], skip_special_tokens=True)
13
 
14
  title = "Spanish to Quechua translation 🦙"
15
- inputs = gr.inputs.Textbox(lines=1, label="Text in Spanish")
16
- outputs = [gr.outputs.Textbox(label="Translated text in Quechua")]
17
 
18
- description = "Here use the [t5-small-finetuned-spanish-to-quechua-model](https://huggingface.co/hackathon-pln-es/t5-small-finetuned-spanish-to-quechua) that was trained with [spanish-to-quechua dataset](https://huggingface.co/datasets/hackathon-pln-es/spanish-to-quechua)."
19
 
20
  article = '''
21
  ## Challenges
@@ -35,4 +34,4 @@ examples=[
35
  ]
36
 
37
  iface = gr.Interface(fn=translate, inputs=inputs, outputs=outputs, theme="grass", css="styles.css", examples=examples, title=title, description=description, article=article)
38
- iface.launch(enable_queue=True)
 
1
  import gradio as gr
2
  from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
3
 
4
+ model_name = 'hackathon-pln-es/t5-small-finetuned-spanish-to-quechua'
5
+ model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
6
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
7
+
8
  def translate(text):
 
 
 
 
9
  input = tokenizer(text, return_tensors="pt")
10
  output = model.generate(input["input_ids"], max_length=40, num_beams=4, early_stopping=True)
 
11
  return tokenizer.decode(output[0], skip_special_tokens=True)
12
 
13
  title = "Spanish to Quechua translation 🦙"
14
+ inputs = gr.Textbox(lines=1, label="Text in Spanish")
15
+ outputs = [gr.Textbox(label="Translated text in Quechua")]
16
 
17
+ description = "Here we use the [t5-small-finetuned-spanish-to-quechua-model](https://huggingface.co/hackathon-pln-es/t5-small-finetuned-spanish-to-quechua) that was trained with [spanish-to-quechua dataset](https://huggingface.co/datasets/hackathon-pln-es/spanish-to-quechua)."
18
 
19
  article = '''
20
  ## Challenges
 
34
  ]
35
 
36
  iface = gr.Interface(fn=translate, inputs=inputs, outputs=outputs, theme="grass", css="styles.css", examples=examples, title=title, description=description, article=article)
37
+ iface.queue().launch()
requirements.txt CHANGED
@@ -1,3 +1,3 @@
1
- gradio
2
- transformers
3
- torch
 
1
+ gradio>=5.35.0
2
+ transformers>=4.53.1
3
+ torch