Fail CI if lint or typecheck fails (#7770)

* fail

* test

* revert

* run shebang

* fix

* fixes

* add changeset

* changes

* bash

* not required

* file

* textbox

* add changeset

* changes to demo

* notebook

---------

Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com>
This commit is contained in:
Abubakar Abid 2024-03-21 08:34:56 -07:00 committed by GitHub
parent ca42748590
commit dd3e363261
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 39 additions and 28 deletions

View File

@ -0,0 +1,5 @@
---
"gradio": patch
---
feat:Fail CI if lint or typecheck fails

View File

@ -68,12 +68,12 @@ jobs:
if: runner.os == 'Linux'
run: |
. venv/bin/activate
bash scripts/lint_backend.sh
./scripts/lint_backend.sh
- name: Typecheck
if: runner.os == 'Linux'
run: |
. venv/bin/activate
bash scripts/type_check_backend.sh
./scripts/type_check_backend.sh
- name: Run tests
run: |
. ${{steps.install_deps.outputs.venv_activate}}

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/files/avatar.png\n", "!wget -q -O files/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/files/lion.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append(((x[\"path\"],), None)) \n", " if message[\"text\"] is not None:\n", " history.append((message[\"text\"], None))\n", " return history, gr.MultimodalTextbox(value=None, interactive=False, file_types=[\"image\"])\n", "\n", "def bot(history):\n", " response = \"**That's cool!**\"\n", " history[-1][1] = \"\"\n", " for character in response:\n", " history[-1][1] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(\n", " [],\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " avatar_images=(None, (os.path.join(os.path.abspath(''), \"files/avatar.png\"))),\n", " )\n", "\n", " chat_input = gr.MultimodalTextbox(interactive=True, file_types=[\"image\"], placeholder=\"Enter message or upload file...\", show_label=False)\n", " chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input], queue=False).then(\n", " bot, chatbot, chatbot, api_name=\"bot_response\"\n", " )\n", " chat_msg.then(lambda: gr.Textbox(interactive=True), None, [chat_input], queue=False)\n", " chatbot.like(print_like_dislike, None, None)\n", "\n", "demo.queue()\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: chatbot_multimodal"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "os.mkdir('files')\n", "!wget -q -O files/avatar.png https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/files/avatar.png\n", "!wget -q -O files/lion.jpg https://github.com/gradio-app/gradio/raw/main/demo/chatbot_multimodal/files/lion.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "import time\n", "\n", "# Chatbot demo with multimodal input (text, markdown, LaTeX, code blocks, image, audio, & video). Plus shows support for streaming text.\n", "\n", "\n", "def print_like_dislike(x: gr.LikeData):\n", " print(x.index, x.value, x.liked)\n", "\n", "def add_message(history, message):\n", " for x in message[\"files\"]:\n", " history.append(((x,), None))\n", " if message[\"text\"] is not None:\n", " history.append((message[\"text\"], None))\n", " return history, gr.MultimodalTextbox(value=None, interactive=False)\n", "\n", "def bot(history):\n", " response = \"**That's cool!**\"\n", " history[-1][1] = \"\"\n", " for character in response:\n", " history[-1][1] += character\n", " time.sleep(0.05)\n", " yield history\n", "\n", "\n", "with gr.Blocks() as demo:\n", " chatbot = gr.Chatbot(\n", " [],\n", " elem_id=\"chatbot\",\n", " bubble_full_width=False,\n", " avatar_images=(None, (os.path.join(os.path.abspath(''), \"files/avatar.png\"))),\n", " )\n", "\n", " chat_input = gr.MultimodalTextbox(interactive=True, file_types=[\"image\"], placeholder=\"Enter message or upload file...\", show_label=False)\n", "\n", " chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])\n", " bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name=\"bot_response\")\n", " bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])\n", "\n", " chatbot.like(print_like_dislike, None, None)\n", "\n", "demo.queue()\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -10,10 +10,10 @@ def print_like_dislike(x: gr.LikeData):
def add_message(history, message):
for x in message["files"]:
history.append(((x["path"],), None))
history.append(((x,), None))
if message["text"] is not None:
history.append((message["text"], None))
return history, gr.MultimodalTextbox(value=None, interactive=False, file_types=["image"])
return history, gr.MultimodalTextbox(value=None, interactive=False)
def bot(history):
response = "**That's cool!**"
@ -33,10 +33,11 @@ with gr.Blocks() as demo:
)
chat_input = gr.MultimodalTextbox(interactive=True, file_types=["image"], placeholder="Enter message or upload file...", show_label=False)
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input], queue=False).then(
bot, chatbot, chatbot, api_name="bot_response"
)
chat_msg.then(lambda: gr.Textbox(interactive=True), None, [chat_input], queue=False)
chat_msg = chat_input.submit(add_message, [chatbot, chat_input], [chatbot, chat_input])
bot_msg = chat_msg.then(bot, chatbot, chatbot, api_name="bot_response")
bot_msg.then(lambda: gr.MultimodalTextbox(interactive=True), None, [chat_input])
chatbot.like(print_like_dislike, None, None)
demo.queue()

View File

@ -3,11 +3,12 @@
from __future__ import annotations
from pathlib import Path
from typing import Any, Callable, List, Literal, Optional, TypedDict
from typing import Any, Callable, List, Literal, TypedDict
import gradio_client.utils as client_utils
from gradio_client.documentation import document
from pydantic import Field
from typing_extensions import NotRequired
from gradio.components.base import FormComponent
from gradio.data_classes import FileData, GradioModel
@ -15,8 +16,8 @@ from gradio.events import Events
class MultimodalData(GradioModel):
text: Optional[str] = None
files: Optional[List[FileData]] = Field(default_factory=list)
text: str
files: List[FileData] = Field(default_factory=list)
class MultimodalPostprocess(TypedDict):
@ -24,6 +25,11 @@ class MultimodalPostprocess(TypedDict):
files: List[FileData]
class MultimodalValue(TypedDict):
text: NotRequired[str]
files: NotRequired[list[str]]
@document()
class MultimodalTextbox(FormComponent):
"""
@ -127,18 +133,21 @@ class MultimodalTextbox(FormComponent):
self.rtl = rtl
self.text_align = text_align
def preprocess(
self, payload: MultimodalData | None
) -> dict[str, str | list] | None:
def preprocess(self, payload: MultimodalData | None) -> MultimodalValue | None:
"""
Parameters:
payload: the text and list of file(s) entered in the multimodal textbox.
Returns:
Passes text value and list of file(s) as a {dict} into the function.
"""
return None if payload is None else payload.model_dump()
if payload is None:
return None
return {
"text": payload.text,
"files": [f.path for f in payload.files],
}
def postprocess(self, value: dict[str, str | list] | None) -> MultimodalData:
def postprocess(self, value: MultimodalValue | None) -> MultimodalData:
"""
Parameters:
value: Expects a {dict} with "text" and "files", both optional. The files array is a list of file paths or URLs.
@ -151,24 +160,20 @@ class MultimodalTextbox(FormComponent):
raise ValueError(
f"MultimodalTextbox expects a dictionary with optional keys 'text' and 'files'. Received {value.__class__.__name__}"
)
text = value.get("text", "")
if "files" in value and isinstance(value["files"], list):
value["files"] = [
files = [
file
if isinstance(file, FileData)
else FileData(
path=file["path"] if "path" in file else file,
mime_type=file["mime_type"]
if "mime_type" in file
else client_utils.get_mimetype(file),
orig_name=file["orig_name"]
if "orig_name" in file
else Path(file).name,
size=file["size"] if "size" in file else Path(file).stat().st_size,
path=file,
orig_name=Path(file).name,
mime_type=client_utils.get_mimetype(file),
)
for file in value["files"]
]
text = value.get("text", "")
files = value.get("files", [])
else:
files = []
if not isinstance(text, str):
raise TypeError(
f"Expected 'text' to be a string, but got {type(text).__name__}"