ahmedheakl commited on
Commit
34a1cb8
·
verified ·
1 Parent(s): 6fc4524

Upload folder using huggingface_hub

Browse files
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
README.md CHANGED
@@ -1,12 +1,14 @@
1
  ---
2
- title: AIN Arabic VLM
3
- emoji: 🏢
4
- colorFrom: blue
5
- colorTo: gray
6
  sdk: gradio
7
- sdk_version: 5.13.1
8
- app_file: app.py
 
 
9
  pinned: false
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
1
  ---
2
+ title: AIN-Arabic-VLM
3
+ app_file: gradio_app.py
 
 
4
  sdk: gradio
5
+ sdk_version: 5.13.0
6
+ license: mit
7
+ colorFrom: blue
8
+ colorTo: red
9
  pinned: false
10
+ emoji: 🌖
11
+ thumbnail: >-
12
+ https://cdn-uploads.huggingface.co/production/uploads/656864e12d73834278a8dea7/pNXWle55Sbsg7xjdYijzC.jpeg
13
+ short_description: The best Arabic-English VLM developed by MBZUAI.
14
+ ---
__pycache__/app.cpython-311.pyc ADDED
Binary file (3.87 kB). View file
 
__pycache__/chat_interface.cpython-311.pyc ADDED
Binary file (57.4 kB). View file
 
app.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+ from flask import Flask, request
3
+ import requests
4
+ from loguru import logger
5
+
6
+ app = Flask(__name__)
7
+
8
+ BOT_TOKEN = "8003439194:AAGQNjq3dQSZi4Ztv2Z0yyZx1ZnD0L5MR5o"
9
+ TELEGRAM_API_URL = f"https://api.telegram.org/bot{BOT_TOKEN}"
10
+
11
+ client = OpenAI(
12
+ api_key="0",
13
+ base_url="https://0f21-5-195-0-150.ngrok-free.app/v1",
14
+ )
15
+
16
+ @app.route("/test")
17
+ def test():
18
+ return "Hello, World!"
19
+
20
+ DEFAULT_CAPTION = "Please provide a caption for the image."
21
+ @app.route(f"/{BOT_TOKEN}", methods=["POST"])
22
+ def telegram_webhook():
23
+ data = request.json
24
+ chat_id = data["message"]["chat"]["id"]
25
+ messages: list[dict] = [{
26
+ "role": "user",
27
+ "content": []
28
+ }]
29
+ logger.debug(f"Received message: {data}")
30
+ if "text" in data["message"]:
31
+ user_message = data["message"]["text"]
32
+ messages[0]['content'].append({"type": "text", "text": user_message})
33
+
34
+ if "photo" in data["message"]:
35
+ file_id = data["message"]["photo"][-1]["file_id"]
36
+ file_info = requests.get(f"{TELEGRAM_API_URL}/getFile?file_id={file_id}").json()
37
+ file_path = file_info["result"]["file_path"]
38
+ image_url = f"https://api.telegram.org/file/bot{BOT_TOKEN}/{file_path}"
39
+ messages[0]['content'].append({
40
+ "type": "image_url",
41
+ "image_url": {"url": image_url},
42
+ })
43
+ if "caption" in data["message"]:
44
+ messages[0]['content'].append({"type": "text", "text": data["message"]["caption"]})
45
+ else:
46
+ messages[0]['content'].append({"type": "text", "text": DEFAULT_CAPTION})
47
+
48
+ if "text" not in data['message'] and "photo" not in data['message']:
49
+ send_message(chat_id, "Unsupported message type. Please send text or an image.")
50
+ return "ok", 200
51
+
52
+ try:
53
+ logger.debug(f"Sending message to OpenAI: {messages}")
54
+ result = client.chat.completions.create(messages=messages, model="test")
55
+ response_text = result.choices[0].message.content
56
+ except Exception as e:
57
+ logger.error(f"Error processing message: {e}")
58
+ response_text = "There was an error processing your request."
59
+
60
+ send_message(chat_id, response_text)
61
+ return "ok", 200
62
+
63
+
64
+ def send_message(chat_id, text):
65
+ url = f"{TELEGRAM_API_URL}/sendMessage"
66
+ payload = {"chat_id": chat_id, "text": text}
67
+ requests.post(url, json=payload)
68
+
69
+
70
+ if __name__ == "__main__":
71
+ app.run(port=5000)
72
+
call.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from openai import OpenAI
2
+
3
+ def main():
4
+ client = OpenAI(
5
+ api_key="0",
6
+ base_url="https://0f21-5-195-0-150.ngrok-free.app/v1",
7
+ )
8
+ messages = []
9
+ messages.append(
10
+ {
11
+ "role": "user",
12
+ "content": [
13
+ {"type": "text", "text": "Output the color and number of each box."},
14
+ {
15
+ "type": "image_url",
16
+ "image_url": {"url": "https://qianwen-res.oss-cn-beijing.aliyuncs.com/Qwen2-VL/boxes.png"},
17
+ },
18
+ ],
19
+ }
20
+ )
21
+ result = client.chat.completions.create(messages=messages, model="test")
22
+ messages.append(result.choices[0].message)
23
+ print(result.choices[0].message)
24
+
25
+ if __name__ == "__main__":
26
+ main()
chat_interface.py ADDED
@@ -0,0 +1,1087 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ This file defines a useful high-level abstraction to build Gradio chatbots: ChatInterface.
3
+ """
4
+
5
+ from __future__ import annotations
6
+
7
+ import builtins
8
+ import copy
9
+ import dataclasses
10
+ import inspect
11
+ import os
12
+ import warnings
13
+ from collections.abc import AsyncGenerator, Callable, Generator, Sequence
14
+ from pathlib import Path
15
+ from typing import Literal, Union, cast
16
+
17
+ import anyio
18
+ from gradio_client.documentation import document
19
+
20
+ from gradio import utils
21
+ from gradio.blocks import Blocks
22
+ from gradio.components import (
23
+ JSON,
24
+ BrowserState,
25
+ Button,
26
+ Chatbot,
27
+ Component,
28
+ Dataset,
29
+ Markdown,
30
+ MultimodalTextbox,
31
+ State,
32
+ Textbox,
33
+ get_component_instance,
34
+ )
35
+ from gradio.components.chatbot import (
36
+ ChatMessage,
37
+ ExampleMessage,
38
+ Message,
39
+ MessageDict,
40
+ TupleFormat,
41
+ )
42
+ from gradio.components.multimodal_textbox import MultimodalPostprocess, MultimodalValue
43
+ from gradio.context import get_blocks_context
44
+ from gradio.events import Dependency, EditData, SelectData
45
+ from gradio.flagging import ChatCSVLogger
46
+ from gradio.helpers import create_examples as Examples # noqa: N812
47
+ from gradio.helpers import special_args, update
48
+ from gradio.layouts import Accordion, Column, Group, Row
49
+ from gradio.routes import Request
50
+ from gradio.themes import ThemeClass as Theme
51
+
52
+
53
+ @document()
54
+ class ChatInterface(Blocks):
55
+ """
56
+ ChatInterface is Gradio's high-level abstraction for creating chatbot UIs, and allows you to create
57
+ a web-based demo around a chatbot model in a few lines of code. Only one parameter is required: fn, which
58
+ takes a function that governs the response of the chatbot based on the user input and chat history. Additional
59
+ parameters can be used to control the appearance and behavior of the demo.
60
+
61
+ Example:
62
+ import gradio as gr
63
+
64
+ def echo(message, history):
65
+ return message
66
+
67
+ demo = gr.ChatInterface(fn=echo, type="messages", examples=[{"text": "hello", "text": "hola", "text": "merhaba"}], title="Echo Bot")
68
+ demo.launch()
69
+ Demos: chatinterface_random_response, chatinterface_streaming_echo, chatinterface_artifacts
70
+ Guides: creating-a-chatbot-fast, sharing-your-app
71
+ """
72
+
73
+ def __init__(
74
+ self,
75
+ fn: Callable,
76
+ *,
77
+ multimodal: bool = False,
78
+ type: Literal["messages", "tuples"] | None = None,
79
+ chatbot: Chatbot | None = None,
80
+ textbox: Textbox | MultimodalTextbox | None = None,
81
+ additional_inputs: str | Component | list[str | Component] | None = None,
82
+ additional_inputs_accordion: str | Accordion | None = None,
83
+ additional_outputs: Component | list[Component] | None = None,
84
+ editable: bool = False,
85
+ examples: list[str] | list[MultimodalValue] | list[list] | None = None,
86
+ example_labels: list[str] | None = None,
87
+ example_icons: list[str] | None = None,
88
+ run_examples_on_click: bool = True,
89
+ cache_examples: bool | None = None,
90
+ cache_mode: Literal["eager", "lazy"] | None = None,
91
+ title: str | None = None,
92
+ description: str | None = None,
93
+ theme: Theme | str | None = None,
94
+ flagging_mode: Literal["never", "manual"] | None = None,
95
+ flagging_options: list[str] | tuple[str, ...] | None = ("Like", "Dislike"),
96
+ flagging_dir: str = ".gradio/flagged",
97
+ css: str | None = None,
98
+ css_paths: str | Path | Sequence[str | Path] | None = None,
99
+ js: str | None = None,
100
+ head: str | None = None,
101
+ head_paths: str | Path | Sequence[str | Path] | None = None,
102
+ analytics_enabled: bool | None = None,
103
+ autofocus: bool = True,
104
+ autoscroll: bool = True,
105
+ submit_btn: str | bool | None = True,
106
+ stop_btn: str | bool | None = True,
107
+ concurrency_limit: int | None | Literal["default"] = "default",
108
+ delete_cache: tuple[int, int] | None = None,
109
+ show_progress: Literal["full", "minimal", "hidden"] = "minimal",
110
+ fill_height: bool = True,
111
+ fill_width: bool = False,
112
+ api_name: str | Literal[False] = "chat",
113
+ save_history: bool = False,
114
+ logo: str | None = None,
115
+ ):
116
+ """
117
+ Parameters:
118
+ fn: the function to wrap the chat interface around. Normally (assuming `type` is set to "messages"), the function should accept two parameters: a `str` representing the input message and `list` of openai-style dictionaries: {"role": "user" | "assistant", "content": `str` | {"path": `str`} | `gr.Component`} representing the chat history. The function should return/yield a `str` (for a simple message), a supported Gradio component (e.g. gr.Image to return an image), a `dict` (for a complete openai-style message response), or a `list` of such messages.
119
+ multimodal: if True, the chat interface will use a `gr.MultimodalTextbox` component for the input, which allows for the uploading of multimedia files. If False, the chat interface will use a gr.Textbox component for the input. If this is True, the first argument of `fn` should accept not a `str` message but a `dict` message with keys "text" and "files"
120
+ type: The format of the messages passed into the chat history parameter of `fn`. If "messages", passes the history as a list of dictionaries with openai-style "role" and "content" keys. The "content" key's value should be one of the following - (1) strings in valid Markdown (2) a dictionary with a "path" key and value corresponding to the file to display or (3) an instance of a Gradio component: at the moment gr.Image, gr.Plot, gr.Video, gr.Gallery, gr.Audio, and gr.HTML are supported. The "role" key should be one of 'user' or 'assistant'. Any other roles will not be displayed in the output. If this parameter is 'tuples' (deprecated), passes the chat history as a `list[list[str | None | tuple]]`, i.e. a list of lists. The inner list should have 2 elements: the user message and the response message.
121
+ chatbot: an instance of the gr.Chatbot component to use for the chat interface, if you would like to customize the chatbot properties. If not provided, a default gr.Chatbot component will be created.
122
+ textbox: an instance of the gr.Textbox or gr.MultimodalTextbox component to use for the chat interface, if you would like to customize the textbox properties. If not provided, a default gr.Textbox or gr.MultimodalTextbox component will be created.
123
+ editable: if True, users can edit past messages to regenerate responses.
124
+ additional_inputs: an instance or list of instances of gradio components (or their string shortcuts) to use as additional inputs to the chatbot. If the components are not already rendered in a surrounding Blocks, then the components will be displayed under the chatbot, in an accordion. The values of these components will be passed into `fn` as arguments in order after the chat history.
125
+ additional_inputs_accordion: if a string is provided, this is the label of the `gr.Accordion` to use to contain additional inputs. A `gr.Accordion` object can be provided as well to configure other properties of the container holding the additional inputs. Defaults to a `gr.Accordion(label="Additional Inputs", open=False)`. This parameter is only used if `additional_inputs` is provided.
126
+ additional_outputs: an instance or list of instances of gradio components to use as additional outputs from the chat function. These must be components that are already defined in the same Blocks scope. If provided, the chat function should return additional values for these components. See $demo/chatinterface_artifacts.
127
+ examples: sample inputs for the function; if provided, appear within the chatbot and can be clicked to populate the chatbot input. Should be a list of strings representing text-only examples, or a list of dictionaries (with keys `text` and `files`) representing multimodal examples. If `additional_inputs` are provided, the examples must be a list of lists, where the first element of each inner list is the string or dictionary example message and the remaining elements are the example values for the additional inputs -- in this case, the examples will appear under the chatbot.
128
+ example_labels: labels for the examples, to be displayed instead of the examples themselves. If provided, should be a list of strings with the same length as the examples list. Only applies when examples are displayed within the chatbot (i.e. when `additional_inputs` is not provided).
129
+ example_icons: icons for the examples, to be displayed above the examples. If provided, should be a list of string URLs or local paths with the same length as the examples list. Only applies when examples are displayed within the chatbot (i.e. when `additional_inputs` is not provided).
130
+ cache_examples: if True, caches examples in the server for fast runtime in examples. The default option in HuggingFace Spaces is True. The default option elsewhere is False.
131
+ cache_mode: if "eager", all examples are cached at app launch. If "lazy", examples are cached for all users after the first use by any user of the app. If None, will use the GRADIO_CACHE_MODE environment variable if defined, or default to "eager".
132
+ run_examples_on_click: if True, clicking on an example will run the example through the chatbot fn and the response will be displayed in the chatbot. If False, clicking on an example will only populate the chatbot input with the example message. Has no effect if `cache_examples` is True
133
+ title: a title for the interface; if provided, appears above chatbot in large font. Also used as the tab title when opened in a browser window.
134
+ description: a description for the interface; if provided, appears above the chatbot and beneath the title in regular font. Accepts Markdown and HTML content.
135
+ theme: a Theme object or a string representing a theme. If a string, will look for a built-in theme with that name (e.g. "soft" or "default"), or will attempt to load a theme from the Hugging Face Hub (e.g. "gradio/monochrome"). If None, will use the Default theme.
136
+ flagging_mode: one of "never", "manual". If "never", users will not see a button to flag an input and output. If "manual", users will see a button to flag.
137
+ flagging_options: a list of strings representing the options that users can choose from when flagging a message. Defaults to ["Like", "Dislike"]. These two case-sensitive strings will render as "thumbs up" and "thumbs down" icon respectively next to each bot message, but any other strings appear under a separate flag icon.
138
+ flagging_dir: path to the the directory where flagged data is stored. If the directory does not exist, it will be created.
139
+ css: Custom css as a code string. This css will be included in the demo webpage.
140
+ css_paths: Custom css as a pathlib.Path to a css file or a list of such paths. This css files will be read, concatenated, and included in the demo webpage. If the `css` parameter is also set, the css from `css` will be included first.
141
+ js: Custom js as a code string. The custom js should be in the form of a single js function. This function will automatically be executed when the page loads. For more flexibility, use the head parameter to insert js inside <script> tags.
142
+ head: Custom html code to insert into the head of the demo webpage. This can be used to add custom meta tags, multiple scripts, stylesheets, etc. to the page.
143
+ head_paths: Custom html code as a pathlib.Path to a html file or a list of such paths. This html files will be read, concatenated, and included in the head of the demo webpage. If the `head` parameter is also set, the html from `head` will be included first.
144
+ analytics_enabled: whether to allow basic telemetry. If None, will use GRADIO_ANALYTICS_ENABLED environment variable if defined, or default to True.
145
+ autofocus: if True, autofocuses to the textbox when the page loads.
146
+ autoscroll: If True, will automatically scroll to the bottom of the chatbot when a new message appears, unless the user scrolls up. If False, will not scroll to the bottom of the chatbot automatically.
147
+ submit_btn: If True, will show a submit button with a submit icon within the textbox. If a string, will use that string as the submit button text in place of the icon. If False, will not show a submit button.
148
+ stop_btn: If True, will show a button with a stop icon during generator executions, to stop generating. If a string, will use that string as the submit button text in place of the stop icon. If False, will not show a stop button.
149
+ concurrency_limit: if set, this is the maximum number of chatbot submissions that can be running simultaneously. Can be set to None to mean no limit (any number of chatbot submissions can be running simultaneously). Set to "default" to use the default concurrency limit (defined by the `default_concurrency_limit` parameter in `.queue()`, which is 1 by default).
150
+ delete_cache: a tuple corresponding [frequency, age] both expressed in number of seconds. Every `frequency` seconds, the temporary files created by this Blocks instance will be deleted if more than `age` seconds have passed since the file was created. For example, setting this to (86400, 86400) will delete temporary files every day. The cache will be deleted entirely when the server restarts. If None, no cache deletion will occur.
151
+ show_progress: how to show the progress animation while event is running: "full" shows a spinner which covers the output component area as well as a runtime display in the upper right corner, "minimal" only shows the runtime display, "hidden" shows no progress animation at all
152
+ fill_height: if True, the chat interface will expand to the height of window.
153
+ fill_width: Whether to horizontally expand to fill container fully. If False, centers and constrains app to a maximum width.
154
+ api_name: the name of the API endpoint to use for the chat interface. Defaults to "chat". Set to False to disable the API endpoint.
155
+ save_history: if True, will save the chat history to the browser's local storage and display previous conversations in a side panel.
156
+ """
157
+ super().__init__(
158
+ analytics_enabled=analytics_enabled,
159
+ mode="chat_interface",
160
+ title=title or "Gradio",
161
+ theme=theme,
162
+ css=css,
163
+ css_paths=css_paths,
164
+ js=js,
165
+ head=head,
166
+ head_paths=head_paths,
167
+ fill_height=fill_height,
168
+ fill_width=fill_width,
169
+ delete_cache=delete_cache,
170
+ )
171
+ self.api_name = api_name
172
+ self.type = type
173
+ self.multimodal = multimodal
174
+ self.concurrency_limit = concurrency_limit
175
+ if isinstance(fn, ChatInterface):
176
+ self.fn = fn.fn
177
+ else:
178
+ self.fn = fn
179
+ self.is_async = inspect.iscoroutinefunction(
180
+ self.fn
181
+ ) or inspect.isasyncgenfunction(self.fn)
182
+ self.is_generator = inspect.isgeneratorfunction(
183
+ self.fn
184
+ ) or inspect.isasyncgenfunction(self.fn)
185
+ self.provided_chatbot = chatbot is not None
186
+ self.examples = examples
187
+ self.examples_messages = self._setup_example_messages(
188
+ examples, example_labels, example_icons
189
+ )
190
+ self.run_examples_on_click = run_examples_on_click
191
+ self.cache_examples = cache_examples
192
+ self.cache_mode = cache_mode
193
+ self.editable = editable
194
+ self.fill_height = fill_height
195
+ self.autoscroll = autoscroll
196
+ self.autofocus = autofocus
197
+ self.title = title
198
+ self.description = description
199
+ self.logo = logo
200
+ self.show_progress = show_progress
201
+ if save_history and not type == "messages":
202
+ raise ValueError("save_history is only supported for type='messages'")
203
+ self.save_history = save_history
204
+ self.additional_inputs = [
205
+ get_component_instance(i)
206
+ for i in utils.none_or_singleton_to_list(additional_inputs)
207
+ ]
208
+ self.additional_outputs = utils.none_or_singleton_to_list(additional_outputs)
209
+ if additional_inputs_accordion is None:
210
+ self.additional_inputs_accordion_params = {
211
+ "label": "Additional Inputs",
212
+ "open": False,
213
+ }
214
+ elif isinstance(additional_inputs_accordion, str):
215
+ self.additional_inputs_accordion_params = {
216
+ "label": additional_inputs_accordion
217
+ }
218
+ elif isinstance(additional_inputs_accordion, Accordion):
219
+ self.additional_inputs_accordion_params = (
220
+ additional_inputs_accordion.recover_kwargs(
221
+ additional_inputs_accordion.get_config()
222
+ )
223
+ )
224
+ else:
225
+ raise ValueError(
226
+ f"The `additional_inputs_accordion` parameter must be a string or gr.Accordion, not {builtins.type(additional_inputs_accordion)}"
227
+ )
228
+ self._additional_inputs_in_examples = False
229
+ if self.additional_inputs and self.examples is not None:
230
+ for example in self.examples:
231
+ if not isinstance(example, list):
232
+ raise ValueError(
233
+ "Examples must be a list of lists when additional inputs are provided."
234
+ )
235
+ for idx, example_for_input in enumerate(example):
236
+ if example_for_input is not None and idx > 0:
237
+ self._additional_inputs_in_examples = True
238
+ break
239
+ if self._additional_inputs_in_examples:
240
+ break
241
+
242
+ if flagging_mode is None:
243
+ flagging_mode = os.getenv("GRADIO_CHAT_FLAGGING_MODE", "never") # type: ignore
244
+ if flagging_mode in ["manual", "never"]:
245
+ self.flagging_mode = flagging_mode
246
+ else:
247
+ raise ValueError(
248
+ "Invalid value for `flagging_mode` parameter."
249
+ "Must be: 'manual' or 'never'."
250
+ )
251
+ self.flagging_options = flagging_options
252
+ self.flagging_dir = flagging_dir
253
+
254
+ with self:
255
+ self.saved_conversations = BrowserState(
256
+ [], storage_key=f"_saved_conversations_{self._id}"
257
+ )
258
+ self.conversation_id = State(None)
259
+ self.saved_input = State() # Stores the most recent user message
260
+ self.null_component = State() # Used to discard unneeded values
261
+
262
+ with Column():
263
+ self._render_header()
264
+ if self.save_history:
265
+ with Row(scale=1):
266
+ self._render_history_area()
267
+ with Column(scale=6):
268
+ self._render_chatbot_area(
269
+ chatbot, textbox, submit_btn, stop_btn
270
+ )
271
+ self._render_footer()
272
+ else:
273
+ self._render_chatbot_area(chatbot, textbox, submit_btn, stop_btn)
274
+ self._render_footer()
275
+
276
+ self._setup_events()
277
+
278
+ def _render_header(self):
279
+ if self.logo:
280
+ Markdown(f"""\
281
+ <p align="center"><img src="{self.logo}" style="height: 100px"/><p>""")
282
+ if self.title:
283
+ Markdown(f"""<center><font size=8>{self.title}</center>""")
284
+ if self.description:
285
+ Markdown(
286
+ f"""\
287
+ <center><font size=3>{self.description}</center>""")
288
+
289
+ def _render_history_area(self):
290
+ with Column(scale=1, min_width=100):
291
+ self.new_chat_button = Button(
292
+ "New chat",
293
+ variant="primary",
294
+ size="md",
295
+ icon=utils.get_icon_path("plus.svg"),
296
+ # scale=0,
297
+ )
298
+ self.chat_history_dataset = Dataset(
299
+ components=[Textbox(visible=False)],
300
+ show_label=False,
301
+ layout="table",
302
+ type="index",
303
+ )
304
+
305
+ def _render_chatbot_area(
306
+ self,
307
+ chatbot: Chatbot | None,
308
+ textbox: Textbox | MultimodalTextbox | None,
309
+ submit_btn: str | bool | None,
310
+ stop_btn: str | bool | None,
311
+ ):
312
+ if chatbot:
313
+ if self.type:
314
+ if self.type != chatbot.type:
315
+ warnings.warn(
316
+ "The type of the gr.Chatbot does not match the type of the gr.ChatInterface."
317
+ f"The type of the gr.ChatInterface, '{self.type}', will be used."
318
+ )
319
+ chatbot.type = cast(Literal["messages", "tuples"], self.type)
320
+ chatbot._setup_data_model()
321
+ else:
322
+ warnings.warn(
323
+ f"The gr.ChatInterface was not provided with a type, so the type of the gr.Chatbot, '{chatbot.type}', will be used."
324
+ )
325
+ self.type = chatbot.type
326
+ self.chatbot = cast(Chatbot, get_component_instance(chatbot, render=True))
327
+ if self.chatbot.examples and self.examples_messages:
328
+ warnings.warn(
329
+ "The ChatInterface already has examples set. The examples provided in the chatbot will be ignored."
330
+ )
331
+ self.chatbot.examples = (
332
+ self.examples_messages
333
+ if not self._additional_inputs_in_examples
334
+ else None
335
+ )
336
+ self.chatbot._setup_examples()
337
+ else:
338
+ self.type = self.type or "tuples"
339
+ self.chatbot = Chatbot(
340
+ label="Chatbot",
341
+ scale=1,
342
+ height=400 if self.fill_height else None,
343
+ type=cast(Literal["messages", "tuples"], self.type),
344
+ autoscroll=self.autoscroll,
345
+ examples=self.examples_messages
346
+ if not self._additional_inputs_in_examples
347
+ else None,
348
+ )
349
+ with Group():
350
+ with Row():
351
+ if textbox:
352
+ textbox.show_label = False
353
+ textbox_ = get_component_instance(textbox, render=True)
354
+ if not isinstance(textbox_, (Textbox, MultimodalTextbox)):
355
+ raise TypeError(
356
+ f"Expected a gr.Textbox or gr.MultimodalTextbox component, but got {builtins.type(textbox_)}"
357
+ )
358
+ self.textbox = textbox_
359
+ else:
360
+ textbox_component = (
361
+ MultimodalTextbox if self.multimodal else Textbox
362
+ )
363
+ self.textbox = textbox_component(
364
+ show_label=False,
365
+ label="Message",
366
+ placeholder="Type a message...",
367
+ scale=7,
368
+ autofocus=self.autofocus,
369
+ submit_btn=submit_btn,
370
+ stop_btn=stop_btn,
371
+ )
372
+
373
+ # Hide the stop button at the beginning, and show it with the given value during the generator execution.
374
+ self.original_stop_btn = self.textbox.stop_btn
375
+ self.textbox.stop_btn = False
376
+ self.fake_api_btn = Button("Fake API", visible=False)
377
+ self.api_response = JSON(
378
+ label="Response", visible=False
379
+ ) # Used to store the response from the API call
380
+
381
+ # Used internally to store the chatbot value when it differs from the value displayed in the chatbot UI.
382
+ # For example, when a user submits a message, the chatbot UI is immediately updated with the user message,
383
+ # but the chatbot_state value is not updated until the submit_fn is called.
384
+ self.chatbot_state = State(self.chatbot.value if self.chatbot.value else [])
385
+
386
+ # Provided so that developers can update the chatbot value from other events outside of `gr.ChatInterface`.
387
+ self.chatbot_value = State(self.chatbot.value if self.chatbot.value else [])
388
+
389
+ def _render_footer(self):
390
+ if self.examples:
391
+ self.examples_handler = Examples(
392
+ examples=self.examples,
393
+ inputs=[self.textbox] + self.additional_inputs,
394
+ outputs=self.chatbot,
395
+ fn=self._examples_stream_fn if self.is_generator else self._examples_fn,
396
+ cache_examples=self.cache_examples,
397
+ cache_mode=cast(Literal["eager", "lazy"], self.cache_mode),
398
+ visible=self._additional_inputs_in_examples,
399
+ preprocess=self._additional_inputs_in_examples,
400
+ )
401
+
402
+ any_unrendered_inputs = any(
403
+ not inp.is_rendered for inp in self.additional_inputs
404
+ )
405
+ if self.additional_inputs and any_unrendered_inputs:
406
+ with Accordion(**self.additional_inputs_accordion_params): # type: ignore
407
+ for input_component in self.additional_inputs:
408
+ if not input_component.is_rendered:
409
+ input_component.render()
410
+
411
+ def _setup_example_messages(
412
+ self,
413
+ examples: list[str] | list[MultimodalValue] | list[list] | None,
414
+ example_labels: list[str] | None = None,
415
+ example_icons: list[str] | None = None,
416
+ ) -> list[ExampleMessage]:
417
+ examples_messages = []
418
+ if examples:
419
+ for index, example in enumerate(examples):
420
+ if isinstance(example, list):
421
+ example = example[0]
422
+ example_message: ExampleMessage = {}
423
+ if isinstance(example, str):
424
+ example_message["text"] = example
425
+ elif isinstance(example, dict):
426
+ example_message["text"] = example.get("text", "")
427
+ example_message["files"] = example.get("files", [])
428
+ if example_labels:
429
+ example_message["display_text"] = example_labels[index]
430
+ if self.multimodal:
431
+ example_files = example_message.get("files")
432
+ if not example_files:
433
+ if example_icons:
434
+ example_message["icon"] = example_icons[index]
435
+ else:
436
+ example_message["icon"] = {
437
+ "path": "",
438
+ "url": None,
439
+ "orig_name": None,
440
+ "mime_type": "text", # for internal use, not a valid mime type
441
+ "meta": {"_type": "gradio.FileData"},
442
+ }
443
+ elif example_icons:
444
+ example_message["icon"] = example_icons[index]
445
+ examples_messages.append(example_message)
446
+ return examples_messages
447
+
448
+ def _generate_chat_title(self, conversation: list[MessageDict]) -> str:
449
+ """
450
+ Generate a title for a conversation by taking the first user message that is a string
451
+ and truncating it to 40 characters. If files are present, add a 📎 to the title.
452
+ """
453
+ title = ""
454
+ for message in conversation:
455
+ if message["role"] == "user":
456
+ if isinstance(message["content"], str):
457
+ title += message["content"]
458
+ break
459
+ else:
460
+ title += "📎 "
461
+ if len(title) > 40:
462
+ title = title[:40] + "..."
463
+ return title or "Conversation"
464
+
465
+ def _save_conversation(
466
+ self,
467
+ index: int | None,
468
+ conversation: list[MessageDict],
469
+ saved_conversations: list[list[MessageDict]],
470
+ ):
471
+ if self.save_history:
472
+ if index is not None:
473
+ saved_conversations[index] = conversation
474
+ else:
475
+ saved_conversations.append(conversation)
476
+ index = len(saved_conversations) - 1
477
+ return index, saved_conversations
478
+
479
+ def _delete_conversation(
480
+ self,
481
+ index: int | None,
482
+ saved_conversations: list[list[MessageDict]],
483
+ ):
484
+ if index is not None:
485
+ saved_conversations.pop(index)
486
+ return None, saved_conversations
487
+
488
+ def _load_chat_history(self, conversations):
489
+ return Dataset(
490
+ samples=[
491
+ [self._generate_chat_title(conv)]
492
+ for conv in conversations or []
493
+ if conv
494
+ ]
495
+ )
496
+
497
+ def _load_conversation(
498
+ self,
499
+ index: int,
500
+ conversations: list[list[MessageDict]],
501
+ ):
502
+ return (
503
+ index,
504
+ Chatbot(
505
+ value=conversations[index], # type: ignore
506
+ feedback_value=[],
507
+ ),
508
+ )
509
+
510
+ def _setup_events(self) -> None:
511
+ from gradio import on
512
+
513
+ submit_triggers = [self.textbox.submit, self.chatbot.retry]
514
+ submit_fn = self._stream_fn if self.is_generator else self._submit_fn
515
+
516
+ synchronize_chat_state_kwargs = {
517
+ "fn": lambda x: (x, x),
518
+ "inputs": [self.chatbot],
519
+ "outputs": [self.chatbot_state, self.chatbot_value],
520
+ "show_api": False,
521
+ "queue": False,
522
+ }
523
+ submit_fn_kwargs = {
524
+ "fn": submit_fn,
525
+ "inputs": [self.saved_input, self.chatbot_state] + self.additional_inputs,
526
+ "outputs": [self.null_component, self.chatbot] + self.additional_outputs,
527
+ "show_api": False,
528
+ "concurrency_limit": cast(
529
+ Union[int, Literal["default"], None], self.concurrency_limit
530
+ ),
531
+ "show_progress": cast(
532
+ Literal["full", "minimal", "hidden"], self.show_progress
533
+ ),
534
+ }
535
+ save_fn_kwargs = {
536
+ "fn": self._save_conversation,
537
+ "inputs": [
538
+ self.conversation_id,
539
+ self.chatbot_state,
540
+ self.saved_conversations,
541
+ ],
542
+ "outputs": [self.conversation_id, self.saved_conversations],
543
+ "show_api": False,
544
+ "queue": False,
545
+ }
546
+
547
+ submit_event = (
548
+ self.textbox.submit(
549
+ self._clear_and_save_textbox,
550
+ [self.textbox],
551
+ [self.textbox, self.saved_input],
552
+ show_api=False,
553
+ queue=False,
554
+ )
555
+ .then( # The reason we do this outside of the submit_fn is that we want to update the chatbot UI with the user message immediately, before the submit_fn is called
556
+ self._append_message_to_history,
557
+ [self.saved_input, self.chatbot],
558
+ [self.chatbot],
559
+ show_api=False,
560
+ queue=False,
561
+ )
562
+ .then(**submit_fn_kwargs)
563
+ )
564
+ submit_event.then(**synchronize_chat_state_kwargs).then(
565
+ lambda: update(value=None, interactive=True),
566
+ None,
567
+ self.textbox,
568
+ show_api=False,
569
+ ).then(**save_fn_kwargs)
570
+
571
+ # Creates the "/chat" API endpoint
572
+ self.fake_api_btn.click(
573
+ submit_fn,
574
+ [self.textbox, self.chatbot_state] + self.additional_inputs,
575
+ [self.api_response, self.chatbot_state] + self.additional_outputs,
576
+ api_name=cast(Union[str, Literal[False]], self.api_name),
577
+ concurrency_limit=cast(
578
+ Union[int, Literal["default"], None], self.concurrency_limit
579
+ ),
580
+ postprocess=False,
581
+ )
582
+
583
+ if (
584
+ isinstance(self.chatbot, Chatbot)
585
+ and self.examples
586
+ and not self._additional_inputs_in_examples
587
+ ):
588
+ if self.cache_examples or self.run_examples_on_click:
589
+ example_select_event = self.chatbot.example_select(
590
+ self.example_clicked,
591
+ None,
592
+ [self.chatbot, self.saved_input],
593
+ show_api=False,
594
+ )
595
+ if not self.cache_examples:
596
+ example_select_event = example_select_event.then(**submit_fn_kwargs)
597
+ example_select_event.then(**synchronize_chat_state_kwargs)
598
+ else:
599
+ self.chatbot.example_select(
600
+ self.example_populated,
601
+ None,
602
+ [self.textbox],
603
+ show_api=False,
604
+ )
605
+
606
+ retry_event = (
607
+ self.chatbot.retry(
608
+ self._pop_last_user_message,
609
+ [self.chatbot_state],
610
+ [self.chatbot_state, self.saved_input],
611
+ show_api=False,
612
+ queue=False,
613
+ )
614
+ .then(
615
+ self._append_message_to_history,
616
+ [self.saved_input, self.chatbot_state],
617
+ [self.chatbot],
618
+ show_api=False,
619
+ queue=False,
620
+ )
621
+ .then(
622
+ lambda: update(interactive=False, placeholder=""),
623
+ outputs=[self.textbox],
624
+ show_api=False,
625
+ )
626
+ .then(**submit_fn_kwargs)
627
+ )
628
+ retry_event.then(**synchronize_chat_state_kwargs).then(
629
+ lambda: update(interactive=True),
630
+ outputs=[self.textbox],
631
+ show_api=False,
632
+ ).then(**save_fn_kwargs)
633
+
634
+ self._setup_stop_events(submit_triggers, [submit_event, retry_event])
635
+
636
+ self.chatbot.undo(
637
+ self._pop_last_user_message,
638
+ [self.chatbot],
639
+ [self.chatbot, self.textbox],
640
+ show_api=False,
641
+ queue=False,
642
+ ).then(**synchronize_chat_state_kwargs).then(**save_fn_kwargs)
643
+
644
+ self.chatbot.option_select(
645
+ self.option_clicked,
646
+ [self.chatbot],
647
+ [self.chatbot, self.saved_input],
648
+ show_api=False,
649
+ ).then(**submit_fn_kwargs).then(**synchronize_chat_state_kwargs).then(
650
+ **save_fn_kwargs
651
+ )
652
+
653
+ self.chatbot.clear(**synchronize_chat_state_kwargs).then(
654
+ self._delete_conversation,
655
+ [self.conversation_id, self.saved_conversations],
656
+ [self.conversation_id, self.saved_conversations],
657
+ show_api=False,
658
+ queue=False,
659
+ )
660
+
661
+ if self.editable:
662
+ self.chatbot.edit(
663
+ self._edit_message,
664
+ [self.chatbot],
665
+ [self.chatbot, self.chatbot_state, self.saved_input],
666
+ show_api=False,
667
+ ).success(**submit_fn_kwargs).success(**synchronize_chat_state_kwargs).then(
668
+ **save_fn_kwargs
669
+ )
670
+
671
+ if self.save_history:
672
+ self.new_chat_button.click(
673
+ lambda: (None, []),
674
+ None,
675
+ [self.conversation_id, self.chatbot],
676
+ show_api=False,
677
+ queue=False,
678
+ ).then(
679
+ lambda x: x,
680
+ [self.chatbot],
681
+ [self.chatbot_state],
682
+ show_api=False,
683
+ queue=False,
684
+ )
685
+
686
+ on(
687
+ triggers=[self.load, self.saved_conversations.change],
688
+ fn=self._load_chat_history,
689
+ inputs=[self.saved_conversations],
690
+ outputs=[self.chat_history_dataset],
691
+ show_api=False,
692
+ queue=False,
693
+ )
694
+
695
+ self.chat_history_dataset.click(
696
+ lambda: [],
697
+ None,
698
+ [self.chatbot],
699
+ show_api=False,
700
+ queue=False,
701
+ show_progress="hidden",
702
+ ).then(
703
+ self._load_conversation,
704
+ [self.chat_history_dataset, self.saved_conversations],
705
+ [self.conversation_id, self.chatbot],
706
+ show_api=False,
707
+ queue=False,
708
+ show_progress="hidden",
709
+ ).then(**synchronize_chat_state_kwargs)
710
+
711
+ if self.flagging_mode != "never":
712
+ flagging_callback = ChatCSVLogger()
713
+ flagging_callback.setup(self.flagging_dir)
714
+ self.chatbot.feedback_options = self.flagging_options
715
+ self.chatbot.like(flagging_callback.flag, self.chatbot)
716
+
717
+ self.chatbot_value.change(
718
+ lambda x: x,
719
+ [self.chatbot_value],
720
+ [self.chatbot],
721
+ show_api=False,
722
+ ).then(**synchronize_chat_state_kwargs)
723
+
724
+ def _setup_stop_events(
725
+ self, event_triggers: list[Callable], events_to_cancel: list[Dependency]
726
+ ) -> None:
727
+ textbox_component = MultimodalTextbox if self.multimodal else Textbox
728
+ if self.is_generator:
729
+ original_submit_btn = self.textbox.submit_btn
730
+ for event_trigger in event_triggers:
731
+ event_trigger(
732
+ utils.async_lambda(
733
+ lambda: textbox_component(
734
+ submit_btn=False,
735
+ stop_btn=self.original_stop_btn,
736
+ )
737
+ ),
738
+ None,
739
+ [self.textbox],
740
+ show_api=False,
741
+ queue=False,
742
+ )
743
+ for event_to_cancel in events_to_cancel:
744
+ event_to_cancel.then(
745
+ utils.async_lambda(
746
+ lambda: textbox_component(
747
+ submit_btn=original_submit_btn, stop_btn=False
748
+ )
749
+ ),
750
+ None,
751
+ [self.textbox],
752
+ show_api=False,
753
+ queue=False,
754
+ )
755
+ self.textbox.stop(
756
+ None,
757
+ None,
758
+ None,
759
+ cancels=events_to_cancel, # type: ignore
760
+ show_api=False,
761
+ )
762
+
763
+ def _clear_and_save_textbox(
764
+ self,
765
+ message: str | MultimodalPostprocess,
766
+ ) -> tuple[
767
+ Textbox | MultimodalTextbox,
768
+ str | MultimodalPostprocess,
769
+ ]:
770
+ return (
771
+ type(self.textbox)("", interactive=False, placeholder=""),
772
+ message,
773
+ )
774
+
775
+ @staticmethod
776
+ def _messages_to_tuples(history_messages: list[MessageDict]) -> TupleFormat:
777
+ history_tuples = []
778
+ for message in history_messages:
779
+ if message["role"] == "user":
780
+ history_tuples.append((message["content"], None))
781
+ elif history_tuples and history_tuples[-1][1] is None:
782
+ history_tuples[-1] = (history_tuples[-1][0], message["content"])
783
+ else:
784
+ history_tuples.append((None, message["content"]))
785
+ return history_tuples
786
+
787
+ @staticmethod
788
+ def _tuples_to_messages(history_tuples: TupleFormat) -> list[MessageDict]:
789
+ history_messages = []
790
+ for message_tuple in history_tuples:
791
+ if message_tuple[0]:
792
+ history_messages.append({"role": "user", "content": message_tuple[0]})
793
+ if message_tuple[1]:
794
+ history_messages.append(
795
+ {"role": "assistant", "content": message_tuple[1]}
796
+ )
797
+ return history_messages
798
+
799
+ def _append_message_to_history(
800
+ self,
801
+ message: MessageDict | Message | str | Component | MultimodalPostprocess | list,
802
+ history: list[MessageDict] | TupleFormat,
803
+ role: Literal["user", "assistant"] = "user",
804
+ ) -> list[MessageDict] | TupleFormat:
805
+ message_dicts = self._message_as_message_dict(message, role)
806
+ if self.type == "tuples":
807
+ history = self._tuples_to_messages(history) # type: ignore
808
+ else:
809
+ history = copy.deepcopy(history)
810
+ history.extend(message_dicts) # type: ignore
811
+ if self.type == "tuples":
812
+ history = self._messages_to_tuples(history) # type: ignore
813
+ return history
814
+
815
+ def _message_as_message_dict(
816
+ self,
817
+ message: MessageDict | Message | str | Component | MultimodalPostprocess | list,
818
+ role: Literal["user", "assistant"],
819
+ ) -> list[MessageDict]:
820
+ """
821
+ Converts a user message, example message, or response from the chat function to a
822
+ list of MessageDict objects that can be appended to the chat history.
823
+ """
824
+ message_dicts = []
825
+ if not isinstance(message, list):
826
+ message = [message]
827
+ for msg in message:
828
+ if isinstance(msg, Message):
829
+ message_dicts.append(msg.model_dump())
830
+ elif isinstance(msg, ChatMessage):
831
+ msg.role = role
832
+ message_dicts.append(
833
+ dataclasses.asdict(msg, dict_factory=utils.dict_factory)
834
+ )
835
+ elif isinstance(msg, (str, Component)):
836
+ message_dicts.append({"role": role, "content": msg})
837
+ elif (
838
+ isinstance(msg, dict) and "content" in msg
839
+ ): # in MessageDict format already
840
+ msg["role"] = role
841
+ message_dicts.append(msg)
842
+ else: # in MultimodalPostprocess format
843
+ for x in msg.get("files", []):
844
+ if isinstance(x, dict):
845
+ x = x.get("path")
846
+ message_dicts.append({"role": role, "content": (x,)})
847
+ if msg["text"] is None or not isinstance(msg["text"], str):
848
+ pass
849
+ else:
850
+ message_dicts.append({"role": role, "content": msg["text"]})
851
+ return message_dicts
852
+
853
+ async def _submit_fn(
854
+ self,
855
+ message: str | MultimodalPostprocess,
856
+ history: TupleFormat | list[MessageDict],
857
+ request: Request,
858
+ *args,
859
+ ) -> tuple:
860
+ inputs, _, _ = special_args(
861
+ self.fn, inputs=[message, history, *args], request=request
862
+ )
863
+ if self.is_async:
864
+ response = await self.fn(*inputs)
865
+ else:
866
+ response = await anyio.to_thread.run_sync(
867
+ self.fn, *inputs, limiter=self.limiter
868
+ )
869
+ if self.additional_outputs:
870
+ response, *additional_outputs = response
871
+ else:
872
+ additional_outputs = None
873
+ history = self._append_message_to_history(message, history, "user")
874
+ history = self._append_message_to_history(response, history, "assistant")
875
+ if additional_outputs:
876
+ return response, history, *additional_outputs
877
+ return response, history
878
+
879
+ async def _stream_fn(
880
+ self,
881
+ message: str | MultimodalPostprocess,
882
+ history: TupleFormat | list[MessageDict],
883
+ request: Request,
884
+ *args,
885
+ ) -> AsyncGenerator[
886
+ tuple,
887
+ None,
888
+ ]:
889
+ inputs, _, _ = special_args(
890
+ self.fn, inputs=[message, history, *args], request=request
891
+ )
892
+ if self.is_async:
893
+ generator = self.fn(*inputs)
894
+ else:
895
+ generator = await anyio.to_thread.run_sync(
896
+ self.fn, *inputs, limiter=self.limiter
897
+ )
898
+ generator = utils.SyncToAsyncIterator(generator, self.limiter)
899
+
900
+ history = self._append_message_to_history(message, history, "user")
901
+ additional_outputs = None
902
+ try:
903
+ first_response = await utils.async_iteration(generator)
904
+ if self.additional_outputs:
905
+ first_response, *additional_outputs = first_response
906
+ history_ = self._append_message_to_history(
907
+ first_response, history, "assistant"
908
+ )
909
+ if not additional_outputs:
910
+ yield first_response, history_
911
+ else:
912
+ yield first_response, history_, *additional_outputs
913
+ except StopIteration:
914
+ yield None, history
915
+ async for response in generator:
916
+ if self.additional_outputs:
917
+ response, *additional_outputs = response
918
+ history_ = self._append_message_to_history(response, history, "assistant")
919
+ if not additional_outputs:
920
+ yield response, history_
921
+ else:
922
+ yield response, history_, *additional_outputs
923
+
924
+ def option_clicked(
925
+ self, history: list[MessageDict], option: SelectData
926
+ ) -> tuple[TupleFormat | list[MessageDict], str | MultimodalPostprocess]:
927
+ """
928
+ When an option is clicked, the chat history is appended with the option value.
929
+ The saved input value is also set to option value. Note that event can only
930
+ be called if self.type is "messages" since options are only available for this
931
+ chatbot type.
932
+ """
933
+ history.append({"role": "user", "content": option.value})
934
+ return history, option.value
935
+
936
+ def _flatten_example_files(self, example: SelectData):
937
+ """
938
+ Returns an example with the files flattened to just the file path.
939
+ Also ensures that the `files` key is always present in the example.
940
+ """
941
+ example.value["files"] = [f["path"] for f in example.value.get("files", [])]
942
+ return example
943
+
944
+ def example_populated(self, example: SelectData):
945
+ if self.multimodal:
946
+ example = self._flatten_example_files(example)
947
+ return example.value
948
+ else:
949
+ return example.value["text"]
950
+
951
+ def _edit_message(
952
+ self, history: list[MessageDict] | TupleFormat, edit_data: EditData
953
+ ) -> tuple[
954
+ list[MessageDict] | TupleFormat,
955
+ list[MessageDict] | TupleFormat,
956
+ str | MultimodalPostprocess,
957
+ ]:
958
+ if isinstance(edit_data.index, (list, tuple)):
959
+ history = history[: edit_data.index[0]]
960
+ else:
961
+ history = history[: edit_data.index]
962
+ return history, history, edit_data.value
963
+
964
+ def example_clicked(
965
+ self, example: SelectData
966
+ ) -> Generator[
967
+ tuple[TupleFormat | list[MessageDict], str | MultimodalPostprocess], None, None
968
+ ]:
969
+ """
970
+ When an example is clicked, the chat history (and saved input) is initially set only
971
+ to the example message. Then, if example caching is enabled, the cached response is loaded
972
+ and added to the chat history as well.
973
+ """
974
+ history = self._append_message_to_history(example.value, [], "user")
975
+ example = self._flatten_example_files(example)
976
+ message = example.value if self.multimodal else example.value["text"]
977
+ yield history, message
978
+ if self.cache_examples:
979
+ history = self.examples_handler.load_from_cache(example.index)[0].root
980
+ yield history, message
981
+
982
+ def _process_example(
983
+ self, message: ExampleMessage | str, response: MessageDict | str | None
984
+ ):
985
+ result = []
986
+ if self.multimodal:
987
+ message = cast(ExampleMessage, message)
988
+ if self.type == "tuples":
989
+ for file in message.get("files", []):
990
+ result.append([file, None])
991
+ if "text" in message:
992
+ result.append([message["text"], None])
993
+ result[-1][1] = response
994
+ else:
995
+ for file in message.get("files", []):
996
+ if isinstance(file, dict):
997
+ file = file.get("path")
998
+ result.append({"role": "user", "content": (file,)})
999
+ if "text" in message:
1000
+ result.append({"role": "user", "content": message["text"]})
1001
+ result.append({"role": "assistant", "content": response})
1002
+ else:
1003
+ message = cast(str, message)
1004
+ if self.type == "tuples":
1005
+ result = [[message, response]]
1006
+ else:
1007
+ result = [
1008
+ {"role": "user", "content": message},
1009
+ {"role": "assistant", "content": response},
1010
+ ]
1011
+ return result
1012
+
1013
+ async def _examples_fn(
1014
+ self, message: ExampleMessage | str, *args
1015
+ ) -> TupleFormat | list[MessageDict]:
1016
+ inputs, _, _ = special_args(self.fn, inputs=[message, [], *args], request=None)
1017
+ if self.is_async:
1018
+ response = await self.fn(*inputs)
1019
+ else:
1020
+ response = await anyio.to_thread.run_sync(
1021
+ self.fn, *inputs, limiter=self.limiter
1022
+ )
1023
+ return self._process_example(message, response) # type: ignore
1024
+
1025
+ async def _examples_stream_fn(
1026
+ self,
1027
+ message: str,
1028
+ *args,
1029
+ ) -> AsyncGenerator:
1030
+ inputs, _, _ = special_args(self.fn, inputs=[message, [], *args], request=None)
1031
+
1032
+ if self.is_async:
1033
+ generator = self.fn(*inputs)
1034
+ else:
1035
+ generator = await anyio.to_thread.run_sync(
1036
+ self.fn, *inputs, limiter=self.limiter
1037
+ )
1038
+ generator = utils.SyncToAsyncIterator(generator, self.limiter)
1039
+ async for response in generator:
1040
+ yield self._process_example(message, response)
1041
+
1042
+ def _pop_last_user_message(
1043
+ self,
1044
+ history: list[MessageDict] | TupleFormat,
1045
+ ) -> tuple[list[MessageDict] | TupleFormat, str | MultimodalPostprocess]:
1046
+ """
1047
+ Removes the message (or set of messages) that the user last sent from the chat history and returns them.
1048
+ If self.multimodal is True, returns a MultimodalPostprocess (dict) object with text and files.
1049
+ If self.multimodal is False, returns just the message text as a string.
1050
+ """
1051
+ if not history:
1052
+ return history, "" if not self.multimodal else {"text": "", "files": []}
1053
+
1054
+ if self.type == "tuples":
1055
+ history = self._tuples_to_messages(history) # type: ignore
1056
+ i = len(history) - 1
1057
+ while i >= 0 and history[i]["role"] == "assistant": # type: ignore
1058
+ i -= 1
1059
+ while i >= 0 and history[i]["role"] == "user": # type: ignore
1060
+ i -= 1
1061
+ last_messages = history[i + 1 :]
1062
+ last_user_message = ""
1063
+ files = []
1064
+ for msg in last_messages:
1065
+ assert isinstance(msg, dict) # noqa: S101
1066
+ if msg["role"] == "user":
1067
+ content = msg["content"]
1068
+ if isinstance(content, tuple):
1069
+ files.append(content[0])
1070
+ else:
1071
+ last_user_message = content
1072
+ return_message = (
1073
+ {"text": last_user_message, "files": files}
1074
+ if self.multimodal
1075
+ else last_user_message
1076
+ )
1077
+ history_ = history[: i + 1]
1078
+ if self.type == "tuples":
1079
+ history_ = self._messages_to_tuples(history_) # type: ignore
1080
+ return history_, return_message # type: ignore
1081
+
1082
+ def render(self) -> ChatInterface:
1083
+ # If this is being rendered inside another Blocks, and the height is not explicitly set, set it to 400 instead of 200.
1084
+ if get_blocks_context() and not self.provided_chatbot:
1085
+ self.chatbot.height = 400
1086
+ super().render()
1087
+ return self
gradio_app.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from collections.abc import Generator
3
+ from openai import OpenAI
4
+ # from gradio.chat_interface import ChatInterface
5
+ from chat_interface import ChatInterface
6
+
7
+ USERNAME = "ahmedheakl"
8
+ SPACE_NAME = "arabic-vlm-app"
9
+ TITLE = "AIN Arabic VLM"
10
+ DESCRIPTION = "Welcome to the AIN Arabic VLM chatbot. The best Arabic-English VLM developed by MBZUAI."
11
+ TOP_N_HISTORY = 2
12
+ LOGO_PATH = "https://huggingface.co/spaces/ahmedheakl/arabic-vlm-app/resolve/main/logo.jpeg"
13
+
14
+
15
+
16
+ def load_chat(
17
+ base_url: str,
18
+ model: str,
19
+ token: str | None = None,
20
+ *,
21
+ system_message: str | None = None,
22
+ **kwargs,
23
+ ) -> gr.ChatInterface:
24
+ client = OpenAI(api_key=token, base_url=base_url)
25
+ start_message = (
26
+ [{"role": "system", "content": system_message}] if system_message else []
27
+ )
28
+
29
+ def open_api_stream(
30
+ message: str, history: list | None
31
+ ) -> Generator[str, None, None]:
32
+ history = history or start_message
33
+ if len(history) > 0 and isinstance(history[0], (list, tuple)):
34
+ history = history[:TOP_N_HISTORY]
35
+ history = ChatInterface._tuples_to_messages(history)
36
+ files = message.get('files', [])
37
+ content = [
38
+ {"type": "text", "text": message.get('text', '')}
39
+ ]
40
+ if files:
41
+ src_path = files[0]
42
+ image_url = f"https://{USERNAME}-{SPACE_NAME}.hf.space/gradio_api/file={src_path}"
43
+ content.append({"type": "image_url", "image_url": {"url": image_url}})
44
+ stream = client.chat.completions.create(
45
+ model=model,
46
+ messages=history + [{"role": "user", "content": content}],
47
+ stream=True,
48
+ )
49
+ response = ""
50
+ for chunk in stream:
51
+ if chunk.choices[0].delta.content is not None:
52
+ response += chunk.choices[0].delta.content
53
+ yield response
54
+
55
+ return ChatInterface(
56
+ open_api_stream, type="messages", **kwargs
57
+ )
58
+
59
+ load_chat(
60
+ "https://0f21-5-195-0-150.ngrok-free.app/v1",
61
+ model="test",
62
+ token="ollama",
63
+ multimodal=True,
64
+ title=TITLE,
65
+ description=DESCRIPTION,
66
+ theme="ocean",
67
+ examples=[
68
+ {
69
+ "text": "أخبرني ما اسم المبنى الموجود في الصورة والشيء المثير للاهتمام فيه",
70
+ "files": ["https://cdn.mos.cms.futurecdn.net/5HrnHp9ybAqYrtruKAsfkN-1200-80.jpg"],
71
+ },
72
+ {
73
+ "text": "ما هو العلم الموجود في الصورة؟",
74
+ "files": ["https://mtc.ae/wp-content/uploads/2023/09/Satin-UAE-Flag-UAE-F-B-Blank.jpg"],
75
+ },
76
+ {
77
+ "text": "How many people are there in the image?",
78
+ "files": ["https://i0.wp.com/eatpitapita.com/wp-content/uploads/2020/02/Arab-Muslim-or-Middle-Eastern-Preview.jpg"]
79
+ },
80
+ ],
81
+ logo=LOGO_PATH,
82
+ ).queue().launch(allowed_paths=["/static"])
logo.jpeg ADDED
old_app.py ADDED
@@ -0,0 +1,97 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from collections.abc import Generator
3
+ from openai import OpenAI
4
+ from gradio.chat_interface import ChatInterface
5
+ from pathlib import Path
6
+ import shutil
7
+ import os
8
+ from fastapi import FastAPI
9
+ from fastapi.staticfiles import StaticFiles
10
+
11
+ USERNAME = "ahmedheakl"
12
+ SPACE_NAME = "arabic-vlm-app"
13
+ TITLE = "AIN Arabic VLM"
14
+ DESCRIPTION = "Welcome to the AIN Arabic VLM chatbot. The best Arabic-English VLM developed by MBZUAI."
15
+ PUBLIC_DIR = Path("static")
16
+ TOP_N_HISTORY = 2
17
+ LOGO_PATH = "./logo.jpeg"
18
+ os.makedirs(PUBLIC_DIR, exist_ok=True)
19
+
20
+ app = FastAPI()
21
+ app.mount("/static", StaticFiles(directory=PUBLIC_DIR), name="static")
22
+
23
+
24
+ # move the logo to public directory
25
+ shutil.copy(LOGO_PATH, PUBLIC_DIR / Path(LOGO_PATH).name)
26
+ logo_path = f"/static/{Path(LOGO_PATH).name}"
27
+
28
+ def load_chat(
29
+ base_url: str,
30
+ model: str,
31
+ token: str | None = None,
32
+ *,
33
+ system_message: str | None = None,
34
+ **kwargs,
35
+ ) -> gr.ChatInterface:
36
+ client = OpenAI(api_key=token, base_url=base_url)
37
+ start_message = (
38
+ [{"role": "system", "content": system_message}] if system_message else []
39
+ )
40
+
41
+ def open_api_stream(
42
+ message: str, history: list | None
43
+ ) -> Generator[str, None, None]:
44
+ history = history or start_message
45
+ if len(history) > 0 and isinstance(history[0], (list, tuple)):
46
+ history = history[:TOP_N_HISTORY]
47
+ history = ChatInterface._tuples_to_messages(history)
48
+ files = message.get('files', [])
49
+ content = [
50
+ {"type": "text", "text": message.get('text', '')}
51
+ ]
52
+ if files:
53
+ src_path = Path(files[0])
54
+ dest_path = PUBLIC_DIR / src_path.name
55
+ shutil.move(src_path, dest_path)
56
+ image_url = f"https://{USERNAME}-{SPACE_NAME}.hf.space/static/{src_path.name}"
57
+ content.append({"type": "image_url", "image_url": {"url": image_url}})
58
+ stream = client.chat.completions.create(
59
+ model=model,
60
+ messages=history + [{"role": "user", "content": content}],
61
+ stream=True,
62
+ )
63
+ response = ""
64
+ for chunk in stream:
65
+ if chunk.choices[0].delta.content is not None:
66
+ response += chunk.choices[0].delta.content
67
+ yield response
68
+
69
+ return ChatInterface(
70
+ open_api_stream, type="messages", **kwargs
71
+ )
72
+ with gr.Blocks(theme=gr.themes.Soft()) as gradio_interface:
73
+ # Add CSS for better styling
74
+ gr.Markdown(
75
+ """
76
+ <style>
77
+ .container { margin: 0 auto; max-width: 1200px; padding: 20px; }
78
+ .header { text-align: center; margin-bottom: 40px; }
79
+ </style>
80
+ """
81
+ )
82
+ # chatbot = gr.Chatbot()
83
+ # textbox = gr.MultimodalTextbox(file_count="single", file_types=["image"], sources=["upload"])
84
+
85
+ load_chat(
86
+ "https://0f21-5-195-0-150.ngrok-free.app/v1",
87
+ model="test",
88
+ token="ollama",
89
+ multimodal=True,
90
+ # chatbot=chatbot,
91
+ # textbox=textbox,
92
+ ).launch()
93
+ app = gr.mount_gradio_app(app, gradio_interface, path="/")
94
+
95
+ if __name__ == "__main__":
96
+ import uvicorn
97
+ uvicorn.run(app, host="0.0.0.0", port=7860)
requirements.txt ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ openai==1.60.0
2
+ requests==2.32.3
3
+ gradio==5.13.0
4
+ gradio_client==1.6.0
run_app.sh ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ #!/bin/bash
2
+ source /apps/local/anaconda3/conda_init.sh
3
+ conda activate .ara-app
4
+
5
+ gunicorn -b 0.0.0.0:5000 app:app
6
+ # connect telegram bot
7
+ #
8
+ # curl -X POST https://api.telegram.org/bot8003439194:AAGQNjq3dQSZi4Ztv2Z0yyZx1ZnD0L5MR5o/setWebhook \
9
+ # -d "url=https://497f-5-195-0-145.ngrok-free.app/8003439194:AAGQNjq3dQSZi4Ztv2Z0yyZx1ZnD0L5MR5o"
setup_amazon.sh ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ sudo apt update
2
+ sudo apt install python3-pip -y
3
+ sudo apt install python3-venv -y
4
+ python3 -m venv venv
5
+ . venv/bin/activate
6
+ sudo apt install nginx -y
7
+ sudo systemctl start nginx
8
+ sudo systemctl enable nginx