mirror of
https://github.com/gradio-app/gradio.git
synced 2025-01-30 11:00:11 +08:00
Add ability add a single message from the bot/user side (#3165)
* chatbot fices * chagnelog * remove starts_with * more fixes * added chatbot multimodal demo * fix height * height * update demo * changelog * format * format * Update gradio/components.py Co-authored-by: Abubakar Abid <abubakar@huggingface.co> * fix * format --------- Co-authored-by: Abubakar Abid <abubakar@huggingface.co>
This commit is contained in:
parent
791418a0a7
commit
9c08bb92c5
@ -35,6 +35,12 @@ By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3157](https://git
|
||||
|
||||
|
||||
## Bug Fixes:
|
||||
- Adds ability to add a single message from the bot or user side. Ex: specify `None` as the second value in the tuple, to add a single message in the chatbot from the "bot" side.
|
||||
|
||||
```python
|
||||
gr.Chatbot([("Hi, I'm DialoGPT. Try asking me a question.", None)])
|
||||
```
|
||||
By [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3165](https://github.com/gradio-app/gradio/pull/3165)
|
||||
* Fixes `gr.utils.delete_none` to only remove props whose values are `None` from the config by [@abidlabs](https://github.com/abidlabs) in [PR 3188](https://github.com/gradio-app/gradio/pull/3188)
|
||||
* Fix bug where embedded demos were not loading files properly by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3177](https://github.com/gradio-app/gradio/pull/3177)
|
||||
* The `change` event is now triggered when users click the 'Clear All' button of the multiselect DropDown component by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3195](https://github.com/gradio-app/gradio/pull/3195)
|
||||
@ -110,7 +116,7 @@ By [@maxaudron](https://github.com/maxaudron) in [PR 3075](https://github.com/gr
|
||||
- Ensure the Video component correctly resets the UI state whe a new video source is loaded and reduce choppiness of UI by [@pngwn](https://github.com/abidlabs) in [PR 3117](https://github.com/gradio-app/gradio/pull/3117)
|
||||
- Fixes loading private Spaces by [@abidlabs](https://github.com/abidlabs) in [PR 3068](https://github.com/gradio-app/gradio/pull/3068)
|
||||
- Added a warning when attempting to launch an `Interface` via the `%%blocks` jupyter notebook magic command by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3126](https://github.com/gradio-app/gradio/pull/3126)
|
||||
- Fixes bug where interactive output image cannot be set when in edit mode by [@dawoodkhan82](https://github.com/freddyaboulton) in [PR 3135](https://github.com/gradio-app/gradio/pull/3135)
|
||||
- Fixes bug where interactive output image cannot be set when in edit mode by [@dawoodkhan82](https://github.com/@dawoodkhan82) in [PR 3135](https://github.com/gradio-app/gradio/pull/3135)
|
||||
- A share link will automatically be created when running on Sagemaker notebooks so that the front-end is properly displayed by [@abidlabs](https://github.com/abidlabs) in [PR 3137](https://github.com/gradio-app/gradio/pull/3137)
|
||||
- Fixes a few dropdown component issues; hide checkmark next to options as expected, and keyboard hover is visible by [@dawoodkhan82](https://github.com/dawoodkhan82) in [PR 3145]https://github.com/gradio-app/gradio/pull/3145)
|
||||
- Fixed bug where example pagination buttons were not visible in dark mode or displayed under the examples table. By [@freddyaboulton](https://github.com/freddyaboulton) in [PR 3144](https://github.com/gradio-app/gradio/pull/3144)
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: chatbot_demo"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import AutoModelForCausalLM, AutoTokenizer\n", "import torch\n", "\n", "tokenizer = AutoTokenizer.from_pretrained(\"microsoft/DialoGPT-medium\")\n", "model = AutoModelForCausalLM.from_pretrained(\"microsoft/DialoGPT-medium\")\n", "\n", "def predict(input, history=[]):\n", " # tokenize the new input sentence\n", " new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')\n", "\n", " # append the new user input tokens to the chat history\n", " bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)\n", "\n", " # generate a response \n", " history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()\n", "\n", " # convert the tokens to text, and then split the responses into lines\n", " response = tokenizer.decode(history[0]).split(\"<|endoftext|>\")\n", " response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list\n", " return response, history\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot()\n", " state = gr.State([])\n", "\n", " with gr.Row():\n", " txt = gr.Textbox(show_label=False, placeholder=\"Enter text and press enter\").style(container=False)\n", " \n", " txt.submit(predict, [txt, state], [chatbot, state])\n", " \n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: chatbot_demo"]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "from transformers import AutoModelForCausalLM, AutoTokenizer\n", "import torch\n", "\n", "tokenizer = AutoTokenizer.from_pretrained(\"microsoft/DialoGPT-medium\")\n", "model = AutoModelForCausalLM.from_pretrained(\"microsoft/DialoGPT-medium\")\n", "\n", "def predict(input, history=[]):\n", " # tokenize the new input sentence\n", " new_user_input_ids = tokenizer.encode(input + tokenizer.eos_token, return_tensors='pt')\n", "\n", " # append the new user input tokens to the chat history\n", " bot_input_ids = torch.cat([torch.LongTensor(history), new_user_input_ids], dim=-1)\n", "\n", " # generate a response \n", " history = model.generate(bot_input_ids, max_length=1000, pad_token_id=tokenizer.eos_token_id).tolist()\n", "\n", " # convert the tokens to text, and then split the responses into lines\n", " response = tokenizer.decode(history[0]).split(\"<|endoftext|>\")\n", " response = [(response[i], response[i+1]) for i in range(0, len(response)-1, 2)] # convert to tuples of list\n", " return response, history\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot()\n", " state = gr.State([])\n", "\n", " with gr.Row():\n", " txt = gr.Textbox(show_label=False, placeholder=\"Enter text and press enter\").style(container=False)\n", "\n", " txt.submit(predict, [txt, state], [chatbot, state])\n", " \n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -26,7 +26,7 @@ with gr.Blocks() as demo:
|
||||
|
||||
with gr.Row():
|
||||
txt = gr.Textbox(show_label=False, placeholder="Enter text and press enter").style(container=False)
|
||||
|
||||
|
||||
txt.submit(predict, [txt, state], [chatbot, state])
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
@ -3861,14 +3861,14 @@ class Chatbot(Changeable, IOComponent, JSONSerializable):
|
||||
"""
|
||||
Displays a chatbot output showing both user submitted messages and responses. Supports a subset of Markdown including bold, italics, code, and images.
|
||||
Preprocessing: this component does *not* accept input.
|
||||
Postprocessing: expects a {List[Tuple[str, str]]}, a list of tuples with user inputs and responses as strings of HTML.
|
||||
Postprocessing: expects function to return a {List[Tuple[str | None, str | None]]}, a list of tuples with user inputs and responses as strings of HTML or Nones. Messages that are `None` are not displayed.
|
||||
|
||||
Demos: chatbot_demo
|
||||
Demos: chatbot_demo, chatbot_multimodal
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
value: List[Tuple[str, str]] | Callable | None = None,
|
||||
value: List[Tuple[str | None, str | None]] | Callable | None = None,
|
||||
color_map: Dict[str, str] | None = None, # Parameter moved to Chatbot.style()
|
||||
*,
|
||||
label: str | None = None,
|
||||
@ -3930,7 +3930,9 @@ class Chatbot(Changeable, IOComponent, JSONSerializable):
|
||||
}
|
||||
return updated_config
|
||||
|
||||
def postprocess(self, y: List[Tuple[str, str]]) -> List[Tuple[str, str]]:
|
||||
def postprocess(
|
||||
self, y: List[Tuple[str | None, str | None]]
|
||||
) -> List[Tuple[str | None, str | None]]:
|
||||
"""
|
||||
Parameters:
|
||||
y: List of tuples representing the message and response pairs. Each message and response should be a string, which may be in Markdown format.
|
||||
@ -3940,7 +3942,10 @@ class Chatbot(Changeable, IOComponent, JSONSerializable):
|
||||
if y is None:
|
||||
return []
|
||||
for i, (message, response) in enumerate(y):
|
||||
y[i] = (self.md.renderInline(message), self.md.renderInline(response))
|
||||
y[i] = (
|
||||
None if message is None else self.md.renderInline(message),
|
||||
None if response is None else self.md.renderInline(response),
|
||||
)
|
||||
return y
|
||||
|
||||
def style(self, *, color_map: Tuple[str, str] | None = None, **kwargs):
|
||||
|
@ -17,6 +17,7 @@ def copy_all_demos(source_dir: str, dest_dir: str):
|
||||
"blocks_update",
|
||||
"calculator",
|
||||
"cancel_events",
|
||||
"chatbot_multimodal",
|
||||
"fake_gan",
|
||||
"fake_diffusion_with_gif",
|
||||
"gender_sentence_default_interpretation",
|
||||
|
@ -7,7 +7,7 @@
|
||||
|
||||
export let elem_id: string = "";
|
||||
export let visible: boolean = true;
|
||||
export let value: Array<[string, string]> = [];
|
||||
export let value: Array<[string | null, string | null]> = [];
|
||||
export let style: Styles = {};
|
||||
export let label: string;
|
||||
export let show_label: boolean = true;
|
||||
@ -20,7 +20,7 @@
|
||||
export let loading_status: LoadingStatus | undefined;
|
||||
</script>
|
||||
|
||||
<Block padding={false} {elem_id} {visible}>
|
||||
<Block {elem_id} {visible}>
|
||||
{#if show_label}
|
||||
<BlockLabel
|
||||
{show_label}
|
||||
|
@ -3,8 +3,8 @@
|
||||
import { colors } from "@gradio/theme";
|
||||
import type { Styles } from "@gradio/utils";
|
||||
|
||||
export let value: Array<[string, string]> | null;
|
||||
let old_value: Array<[string, string]> | null;
|
||||
export let value: Array<[string | null, string | null]> | null;
|
||||
let old_value: Array<[string | null, string | null]> | null;
|
||||
export let style: Styles = {};
|
||||
export let pending_message: boolean = false;
|
||||
|
||||
@ -63,6 +63,7 @@
|
||||
data-testid="user"
|
||||
class:latest={i === _value.length - 1}
|
||||
class="message user"
|
||||
class:hide={message[0] === null}
|
||||
style={"background-color:" + _colors[0]}
|
||||
>
|
||||
{@html message[0]}
|
||||
@ -71,6 +72,7 @@
|
||||
data-testid="bot"
|
||||
class:latest={i === _value.length - 1}
|
||||
class="message bot"
|
||||
class:hide={message[1] === null}
|
||||
style={"background-color:" + _colors[1]}
|
||||
>
|
||||
{@html message[1]}
|
||||
@ -94,7 +96,7 @@
|
||||
|
||||
<style>
|
||||
.wrap {
|
||||
height: var(--size-80);
|
||||
height: 100%;
|
||||
overflow-y: auto;
|
||||
}
|
||||
|
||||
@ -183,4 +185,8 @@
|
||||
opacity: 0.8;
|
||||
}
|
||||
}
|
||||
|
||||
.hide {
|
||||
display: none;
|
||||
}
|
||||
</style>
|
||||
|
Loading…
Reference in New Issue
Block a user