mirror of
https://github.com/gradio-app/gradio.git
synced 2024-11-27 01:40:20 +08:00
Adding more docs for using components in chatbot (#8593)
* adding more docs for components in chatbot * add changeset * add changeset * add simple demo * simple demo * notebook --------- Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com>
This commit is contained in:
parent
7fc7455553
commit
d35c290aad
6
.changeset/evil-heads-take.md
Normal file
6
.changeset/evil-heads-take.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
"gradio": patch
|
||||
"website": patch
|
||||
---
|
||||
|
||||
feat:Adding more docs for using components in chatbot
|
BIN
demo/chatbot_core_components_simple/files/audio.wav
Normal file
BIN
demo/chatbot_core_components_simple/files/audio.wav
Normal file
Binary file not shown.
BIN
demo/chatbot_core_components_simple/files/avatar.png
Normal file
BIN
demo/chatbot_core_components_simple/files/avatar.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 5.2 KiB |
1
demo/chatbot_core_components_simple/files/sample.txt
Normal file
1
demo/chatbot_core_components_simple/files/sample.txt
Normal file
@ -0,0 +1 @@
|
||||
hello friends
|
BIN
demo/chatbot_core_components_simple/files/world.mp4
Normal file
BIN
demo/chatbot_core_components_simple/files/world.mp4
Normal file
Binary file not shown.
1
demo/chatbot_core_components_simple/run.ipynb
Normal file
1
demo/chatbot_core_components_simple/run.ipynb
Normal file
@ -0,0 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_core_components_simple"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/audio.wav https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components_simple/files/audio.wav\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components_simple/files/avatar.png\n", "!wget -q -O files/sample.txt https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components_simple/files/sample.txt\n", "!wget -q -O files/world.mp4 https://github.com/gradio-app/gradio/raw/main/demo/chatbot_core_components_simple/files/world.mp4"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import random\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "color_map = {\n", " \"harmful\": \"crimson\",\n", " \"neutral\": \"gray\",\n", " \"beneficial\": \"green\",\n", "}\n", "\n", "def html_src(harm_level):\n", " return f\"\"\"\n", "<div style=\"display: flex; gap: 5px;padding: 2px 4px;margin-top: -40px\">\n", " <div style=\"background-color: {color_map[harm_level]}; padding: 2px; border-radius: 5px;\">\n", " {harm_level}\n", " </div>\n", "</div>\n", "\"\"\"\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append(((x,), None))\n", " if message[\"text\"] is not None:\n", " history.append((message[\"text\"], None))\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "def bot(history, response_type):\n", " if response_type == \"gallery\":\n", " history[-1][1] = gr.Gallery(\n", " [\"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png\",\n", " \"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png\"]\n", " )\n", " elif response_type == \"image\":\n", " history[-1][1] = gr.Image(\"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png\")\n", " elif response_type == \"video\":\n", " history[-1][1] = gr.Video(\"https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/world.mp4\")\n", " elif response_type == \"audio\":\n", " history[-1][1] = gr.Audio(\"https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav\")\n", " elif response_type == \"html\":\n", " history[-1][1] = gr.HTML(\n", " html_src(random.choice([\"harmful\", \"neutral\", \"beneficial\"]))\n", " )\n", " else:\n", " history[-1][1] = \"Cool!\"\n", " return history\n", "\n", "\n", "with gr.Blocks(fill_height=True) as demo:\n", " chatbot = gr.Chatbot(\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " scale=1,\n", " )\n", " response_type = gr.Radio(\n", " [\n", " \"image\",\n", " \"text\",\n", " \"gallery\",\n", " \"video\",\n", " \"audio\",\n", " \"html\",\n", " ],\n", " value=\"text\",\n", " label=\"Response Type\",\n", " )\n", "\n", " chat_input = gr.MultimodalTextbox(\n", " interactive=True,\n", " placeholder=\"Enter message or upload file...\",\n", " show_label=False,\n", " )\n", "\n", " chat_msg = chat_input.submit(\n", " add_message, [chatbot, chat_input], [chatbot, chat_input]\n", " )\n", " bot_msg = chat_msg.then(\n", " bot, [chatbot, response_type], chatbot, api_name=\"bot_response\"\n", " )\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None)\n", "\n", "demo.queue()\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
91
demo/chatbot_core_components_simple/run.py
Normal file
91
demo/chatbot_core_components_simple/run.py
Normal file
@ -0,0 +1,91 @@
|
||||
import gradio as gr
|
||||
import os
|
||||
import random
|
||||
|
||||
# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.
|
||||
|
||||
|
||||
color_map = {
|
||||
"harmful": "crimson",
|
||||
"neutral": "gray",
|
||||
"beneficial": "green",
|
||||
}
|
||||
|
||||
def html_src(harm_level):
|
||||
return f"""
|
||||
<div style="display: flex; gap: 5px;padding: 2px 4px;margin-top: -40px">
|
||||
<div style="background-color: {color_map[harm_level]}; padding: 2px; border-radius: 5px;">
|
||||
{harm_level}
|
||||
</div>
|
||||
</div>
|
||||
"""
|
||||
|
||||
def print_like_dislike(x: gr.LikeData):
|
||||
print(x.index, x.value, x.liked)
|
||||
|
||||
def add_message(history, message):
|
||||
for x in message["files"]:
|
||||
history.append(((x,), None))
|
||||
if message["text"] is not None:
|
||||
history.append((message["text"], None))
|
||||
return history, gr.MultimodalTextbox(value=None, interactive=False)
|
||||
|
||||
def bot(history, response_type):
|
||||
if response_type == "gallery":
|
||||
history[-1][1] = gr.Gallery(
|
||||
["https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png",
|
||||
"https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png"]
|
||||
)
|
||||
elif response_type == "image":
|
||||
history[-1][1] = gr.Image("https://raw.githubusercontent.com/gradio-app/gradio/main/test/test_files/bus.png")
|
||||
elif response_type == "video":
|
||||
history[-1][1] = gr.Video("https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/world.mp4")
|
||||
elif response_type == "audio":
|
||||
history[-1][1] = gr.Audio("https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav")
|
||||
elif response_type == "html":
|
||||
history[-1][1] = gr.HTML(
|
||||
html_src(random.choice(["harmful", "neutral", "beneficial"]))
|
||||
)
|
||||
else:
|
||||
history[-1][1] = "Cool!"
|
||||
return history
|
||||
|
||||
|
||||
with gr.Blocks(fill_height=True) as demo:
|
||||
chatbot = gr.Chatbot(
|
||||
elem_id="chatbot",
|
||||
bubble_full_width=False,
|
||||
scale=1,
|
||||
)
|
||||
response_type = gr.Radio(
|
||||
[
|
||||
"image",
|
||||
"text",
|
||||
"gallery",
|
||||
"video",
|
||||
"audio",
|
||||
"html",
|
||||
],
|
||||
value="text",
|
||||
label="Response Type",
|
||||
)
|
||||
|
||||
chat_input = gr.MultimodalTextbox(
|
||||
interactive=True,
|
||||
placeholder="Enter message or upload file...",
|
||||
show_label=False,
|
||||
)
|
||||
|
||||
chat_msg = chat_input.submit(
|
||||
add_message, [chatbot, chat_input], [chatbot, chat_input]
|
||||
)
|
||||
bot_msg = chat_msg.then(
|
||||
bot, [chatbot, response_type], chatbot, api_name="bot_response"
|
||||
)
|
||||
bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
|
||||
|
||||
chatbot.like(print_like_dislike, None, None)
|
||||
|
||||
demo.queue()
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
@ -62,7 +62,7 @@ class Chatbot(Component):
|
||||
Also supports audio/video/image files, which are displayed in the Chatbot, and other kinds of files which are displayed as links. This
|
||||
component is usually used as an output component.
|
||||
|
||||
Demos: chatbot_simple, chatbot_multimodal
|
||||
Demos: chatbot_simple, chatbot_multimodal, chatbot_core_components_simple
|
||||
Guides: creating-a-chatbot
|
||||
"""
|
||||
|
||||
|
@ -193,6 +193,26 @@ demo.launch()
|
||||
|
||||
If you need to create something even more custom, then its best to construct the chatbot UI using the low-level `gr.Blocks()` API. We have [a dedicated guide for that here](/guides/creating-a-custom-chatbot-with-blocks).
|
||||
|
||||
## Using Gradio Components inside the Chatbot
|
||||
|
||||
The `Chatbot` component supports using many of the core Gradio components (such as `gr.Image`, `gr.Plot`, `gr.Audio`, and `gr.HTML`) inside of the chatbot. Simply return one of these components from your function to use it with `gr.ChatInterface`. Here's an example:
|
||||
|
||||
```py
|
||||
import gradio as gr
|
||||
|
||||
def fake(message, history):
|
||||
if message.strip():
|
||||
return gr.Audio("https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav")
|
||||
else:
|
||||
return "Please provide the name of an artist"
|
||||
|
||||
gr.ChatInterface(
|
||||
fake,
|
||||
textbox=gr.Textbox(placeholder="Which artist's music do you want to listen to?", scale=7),
|
||||
chatbot=gr.Chatbot(placeholder="Play music by any artist!"),
|
||||
).launch()
|
||||
```
|
||||
|
||||
## Using your chatbot via an API
|
||||
|
||||
Once you've built your Gradio chatbot and are hosting it on [Hugging Face Spaces](https://hf.space) or somewhere else, then you can query it with a simple API at the `/chat` endpoint. The endpoint just expects the user's message (and potentially additional inputs if you have set any using the `additional_inputs` parameter), and will return the response, internally keeping track of the messages sent so far.
|
||||
|
@ -69,6 +69,29 @@ def predict(···) -> list[list[str | tuple[str] | tuple[str, str] | None] | tu
|
||||
<ShortcutTable shortcuts={obj.string_shortcuts} />
|
||||
{/if}
|
||||
|
||||
### Examples
|
||||
|
||||
**Using Gradio Components Inside `gr.Chatbot`**
|
||||
|
||||
The `Chatbot` component supports using many of the core Gradio components (such as `gr.Image`, `gr.Plot`, `gr.Audio`, and `gr.HTML`) inside of the chatbot. Simply include one of these components in your list of tuples. Here's an example:
|
||||
|
||||
```py
|
||||
import gradio as gr
|
||||
|
||||
def load():
|
||||
return [
|
||||
("Here's an audio", gr.Audio("https://github.com/gradio-app/gradio/raw/main/test/test_files/audio_sample.wav")),
|
||||
("Here's an video", gr.Video("https://github.com/gradio-app/gradio/raw/main/demo/video_component/files/world.mp4"))
|
||||
]
|
||||
|
||||
with gr.Blocks() as demo:
|
||||
chatbot = gr.Chatbot()
|
||||
button = gr.Button("Load audio and video")
|
||||
button.click(load, None, chatbot)
|
||||
|
||||
demo.launch()
|
||||
```
|
||||
|
||||
{#if obj.demos && obj.demos.length > 0}
|
||||
<!--- Demos -->
|
||||
### Demos
|
||||
|
Loading…
Reference in New Issue
Block a user