ensure ImageEditor events work as expected (#7845)

* changes

* changes

* more fix

* more fix

* add changeset

* fix initial crop

* fix format

* fix format

* fix formats

* faster?

* race condition

* fixes + test

* fix type?

* notebooks

* fix type

* change demo

* add changeset

* fix type

* fix type

* fix type again

* fix type again again

* lint

* lint again

* fix test

* tests

* fix

* tests

* address comments

* fix notebooks

* fix tests

* fix stories

* fix webcam ui

* cleanup

* add changeset

* fix input event

* add format param + fix input event

* fix test

---------

Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com>
Co-authored-by: Abubakar Abid <abubakar@huggingface.co>
This commit is contained in:
pngwn 2024-04-15 16:16:07 -04:00 committed by GitHub
parent a0c2848ab5
commit dbb7373dde
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
32 changed files with 629 additions and 148 deletions

View File

@ -0,0 +1,7 @@
---
"@gradio/client": minor
"@gradio/imageeditor": minor
"gradio": minor
---
feat:ensure `ImageEditor` events work as expected

View File

@ -1118,15 +1118,13 @@ export function api_factory(
async function component_server(
component_id: number,
fn_name: string,
data: unknown[]
data: unknown[] | { binary: boolean; data: Record<string, any> }
): Promise<any> {
const headers: {
Authorization?: string;
"Content-Type": "application/json";
} = { "Content-Type": "application/json" };
if (hf_token) {
headers.Authorization = `Bearer ${hf_token}`;
}
} = {};
let root_url: string;
let component = config.components.find(
(comp) => comp.id === component_id
@ -1136,28 +1134,53 @@ export function api_factory(
} else {
root_url = config.root;
}
const response = await fetch_implementation(
`${root_url}/component_server/`,
{
method: "POST",
body: JSON.stringify({
data: data,
component_id: component_id,
fn_name: fn_name,
session_hash: session_hash
}),
headers
}
);
if (!response.ok) {
throw new Error(
"Could not connect to component server: " + response.statusText
);
let body: FormData | string;
if (data.binary) {
body = new FormData();
for (const key in data.data) {
if (key === "binary") continue;
body.append(key, data.data[key]);
}
body.set("component_id", component_id);
body.set("fn_name", fn_name);
body.set("session_hash", session_hash);
} else {
body = JSON.stringify({
data: data,
component_id,
fn_name,
session_hash
});
headers["Content-Type"] = "application/json";
}
if (hf_token) {
headers.Authorization = `Bearer ${hf_token}`;
}
const output = await response.json();
return output;
try {
const response = await fetch_implementation(
`${root_url}/component_server/`,
{
method: "POST",
body: body,
headers
}
);
if (!response.ok) {
// console.warn(await response.text());
throw new Error(
"Could not connect to component server: " + response.statusText
);
}
const output = await response.json();
return output;
} catch (e) {
console.warn(e);
}
}
async function view_api(config?: Config): Promise<ApiInfo<JsApiData>> {

View File

@ -1 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_editor"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/image_editor/cheetah.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "\n", "def sleep(im):\n", " time.sleep(5)\n", " return [im[\"background\"], im[\"layers\"][0], im[\"layers\"][1], im[\"composite\"]]\n", "\n", "\n", "with gr.Blocks() as demo:\n", " im = gr.ImageEditor(\n", " type=\"pil\",\n", " crop_size=\"1:1\",\n", " )\n", "\n", " with gr.Group():\n", " with gr.Row():\n", " im_out_1 = gr.Image(type=\"pil\")\n", " im_out_2 = gr.Image(type=\"pil\")\n", " im_out_3 = gr.Image(type=\"pil\")\n", " im_out_4 = gr.Image(type=\"pil\")\n", "\n", " btn = gr.Button()\n", " im.change(sleep, outputs=[im_out_1, im_out_2, im_out_3, im_out_4], inputs=im)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_editor"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["# Downloading files from the demo repo\n", "import os\n", "!wget -q https://github.com/gradio-app/gradio/raw/main/demo/image_editor/cheetah.jpg"]}, {"cell_type": "code", "execution_count": null, "id": "44380577570523278879349135829904343037", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import time\n", "\n", "\n", "def sleep(im):\n", " time.sleep(5)\n", " return [im[\"background\"], im[\"layers\"][0], im[\"layers\"][1], im[\"composite\"]]\n", "\n", "\n", "def predict(im):\n", " return im[\"composite\"]\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Row():\n", " im = gr.ImageEditor(\n", " type=\"numpy\",\n", " crop_size=\"1:1\",\n", " )\n", " im_preview = gr.Image()\n", " n_upload = gr.Number(0, label=\"Number of upload events\", step=1)\n", " n_change = gr.Number(0, label=\"Number of change events\", step=1)\n", " n_input = gr.Number(0, label=\"Number of input events\", step=1)\n", "\n", " im.upload(lambda x: x + 1, outputs=n_upload, inputs=n_upload)\n", " im.change(lambda x: x + 1, outputs=n_change, inputs=n_change)\n", " im.input(lambda x: x + 1, outputs=n_input, inputs=n_input)\n", " im.change(predict, outputs=im_preview, inputs=im, show_progress=\"hidden\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -7,21 +7,25 @@ def sleep(im):
return [im["background"], im["layers"][0], im["layers"][1], im["composite"]]
def predict(im):
return im["composite"]
with gr.Blocks() as demo:
im = gr.ImageEditor(
type="pil",
crop_size="1:1",
)
with gr.Row():
im = gr.ImageEditor(
type="numpy",
crop_size="1:1",
)
im_preview = gr.Image()
n_upload = gr.Number(0, label="Number of upload events", step=1)
n_change = gr.Number(0, label="Number of change events", step=1)
n_input = gr.Number(0, label="Number of input events", step=1)
with gr.Group():
with gr.Row():
im_out_1 = gr.Image(type="pil")
im_out_2 = gr.Image(type="pil")
im_out_3 = gr.Image(type="pil")
im_out_4 = gr.Image(type="pil")
btn = gr.Button()
im.change(sleep, outputs=[im_out_1, im_out_2, im_out_3, im_out_4], inputs=im)
im.upload(lambda x: x + 1, outputs=n_upload, inputs=n_upload)
im.change(lambda x: x + 1, outputs=n_change, inputs=n_change)
im.input(lambda x: x + 1, outputs=n_input, inputs=n_input)
im.change(predict, outputs=im_preview, inputs=im, show_progress="hidden")
if __name__ == "__main__":
demo.launch()

View File

@ -0,0 +1 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: image_editor_events"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "\n", "def predict(im):\n", " return im[\"composite\"]\n", "\n", "\n", "with gr.Blocks() as demo:\n", " with gr.Group():\n", " with gr.Row():\n", " im = gr.ImageEditor(\n", " type=\"numpy\",\n", " crop_size=\"1:1\",\n", " elem_id=\"image_editor\",\n", " )\n", " im_preview = gr.Image()\n", " with gr.Group():\n", " with gr.Row():\n", "\n", " n_upload = gr.Label(\n", " 0,\n", " label=\"upload\",\n", " elem_id=\"upload\",\n", " )\n", " n_change = gr.Label(\n", " 0,\n", " label=\"change\",\n", " elem_id=\"change\",\n", " )\n", " n_input = gr.Label(\n", " 0,\n", " label=\"input\",\n", " elem_id=\"input\",\n", " )\n", " n_apply = gr.Label(\n", " 0,\n", " label=\"apply\",\n", " elem_id=\"apply\",\n", " )\n", " clear_btn = gr.Button(\"Clear\", elem_id=\"clear\")\n", "\n", " im.upload(\n", " lambda x: int(x) + 1, outputs=n_upload, inputs=n_upload, show_progress=\"hidden\"\n", " )\n", " im.change(\n", " lambda x: int(x) + 1, outputs=n_change, inputs=n_change, show_progress=\"hidden\"\n", " )\n", " im.input(\n", " lambda x: int(x) + 1, outputs=n_input, inputs=n_input, show_progress=\"hidden\"\n", " )\n", " im.apply(\n", " lambda x: int(x) + 1, outputs=n_apply, inputs=n_apply, show_progress=\"hidden\"\n", " )\n", " im.change(predict, outputs=im_preview, inputs=im, show_progress=\"hidden\")\n", " clear_btn.click(\n", " lambda: None,\n", " None,\n", " im,\n", " )\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -0,0 +1,62 @@
import gradio as gr
def predict(im):
return im["composite"]
with gr.Blocks() as demo:
with gr.Group():
with gr.Row():
im = gr.ImageEditor(
type="numpy",
crop_size="1:1",
elem_id="image_editor",
)
im_preview = gr.Image()
with gr.Group():
with gr.Row():
n_upload = gr.Label(
0,
label="upload",
elem_id="upload",
)
n_change = gr.Label(
0,
label="change",
elem_id="change",
)
n_input = gr.Label(
0,
label="input",
elem_id="input",
)
n_apply = gr.Label(
0,
label="apply",
elem_id="apply",
)
clear_btn = gr.Button("Clear", elem_id="clear")
im.upload(
lambda x: int(x) + 1, outputs=n_upload, inputs=n_upload, show_progress="hidden"
)
im.change(
lambda x: int(x) + 1, outputs=n_change, inputs=n_change, show_progress="hidden"
)
im.input(
lambda x: int(x) + 1, outputs=n_input, inputs=n_input, show_progress="hidden"
)
im.apply(
lambda x: int(x) + 1, outputs=n_apply, inputs=n_apply, show_progress="hidden"
)
im.change(predict, outputs=im_preview, inputs=im, show_progress="hidden")
clear_btn.click(
lambda: None,
None,
im,
)
if __name__ == "__main__":
demo.launch()

View File

@ -43,13 +43,15 @@ class AnnotatedImage(Component):
def __init__(
self,
value: tuple[
np.ndarray | PIL.Image.Image | str,
list[tuple[np.ndarray | tuple[int, int, int, int], str]],
]
| None = None,
value: (
tuple[
np.ndarray | PIL.Image.Image | str,
list[tuple[np.ndarray | tuple[int, int, int, int], str]],
]
| None
) = None,
*,
format: str = "png",
format: str = "webp",
show_legend: bool = True,
height: int | str | None = None,
width: int | str | None = None,
@ -120,11 +122,13 @@ class AnnotatedImage(Component):
def postprocess(
self,
value: tuple[
np.ndarray | PIL.Image.Image | str,
list[tuple[np.ndarray | tuple[int, int, int, int], str]],
]
| None,
value: (
tuple[
np.ndarray | PIL.Image.Image | str,
list[tuple[np.ndarray | tuple[int, int, int, int], str]],
]
| None
),
) -> AnnotatedImageData | None:
"""
Parameters:

View File

@ -147,9 +147,10 @@ class Component(ComponentBase, Block):
every: float | None = None,
):
self.server_fns = [
value
for value in self.__class__.__dict__.values()
if callable(value) and getattr(value, "_is_server_fn", False)
getattr(self, value)
for value in dir(self.__class__)
if callable(getattr(self, value))
and getattr(getattr(self, value), "_is_server_fn", False)
]
# Svelte components expect elem_classes to be a list

View File

@ -46,11 +46,11 @@ class Gallery(Component):
def __init__(
self,
value: list[np.ndarray | PIL.Image.Image | str | Path | tuple]
| Callable
| None = None,
value: (
list[np.ndarray | PIL.Image.Image | str | Path | tuple] | Callable | None
) = None,
*,
format: str = "png",
format: str = "webp",
label: str | None = None,
every: float | None = None,
show_label: bool | None = None,
@ -67,8 +67,9 @@ class Gallery(Component):
allow_preview: bool = True,
preview: bool | None = None,
selected_index: int | None = None,
object_fit: Literal["contain", "cover", "fill", "none", "scale-down"]
| None = None,
object_fit: (
Literal["contain", "cover", "fill", "none", "scale-down"] | None
) = None,
show_share_button: bool | None = None,
show_download_button: bool | None = True,
interactive: bool | None = None,

View File

@ -43,7 +43,7 @@ class Image(StreamingInput, Component):
self,
value: str | PIL.Image.Image | np.ndarray | None = None,
*,
format: str = "png",
format: str = "webp",
height: int | str | None = None,
width: int | str | None = None,
image_mode: Literal[
@ -163,7 +163,7 @@ class Image(StreamingInput, Component):
suffix = "jpeg"
else:
name = "image"
suffix = "png"
suffix = "webp"
if suffix.lower() == "svg":
return str(file_path)

View File

@ -4,16 +4,18 @@ from __future__ import annotations
import dataclasses
import warnings
from io import BytesIO
from pathlib import Path
from typing import Any, Iterable, List, Literal, Optional, TypedDict, Union, cast
from typing import Any, Iterable, List, Literal, Optional, Tuple, Union, cast
import numpy as np
import PIL.Image
from gradio_client import file
from gradio_client.documentation import document
from typing_extensions import TypedDict
from gradio import image_utils, utils
from gradio.components.base import Component
from gradio.components.base import Component, server
from gradio.data_classes import FileData, GradioModel
from gradio.events import Events
@ -28,7 +30,7 @@ class EditorValue(TypedDict):
class EditorExampleValue(TypedDict):
background: Optional[str]
layers: Optional[list[str | None]]
layers: Optional[list[Union[str, None]]]
composite: Optional[str]
@ -36,6 +38,25 @@ class EditorData(GradioModel):
background: Optional[FileData] = None
layers: List[FileData] = []
composite: Optional[FileData] = None
id: Optional[str] = None
class EditorDataBlobs(GradioModel):
background: Optional[bytes]
layers: List[Union[bytes, None]]
composite: Optional[bytes]
class BlobData(TypedDict):
type: str
index: Optional[int]
file: bytes
id: str
class AcceptBlobs(GradioModel):
data: BlobData
files: List[Tuple[str, bytes]]
@dataclasses.dataclass
@ -95,9 +116,12 @@ class ImageEditor(Component):
EVENTS = [
Events.clear,
Events.change,
Events.input,
Events.select,
Events.upload,
Events.apply,
]
data_model = EditorData
def __init__(
@ -134,6 +158,7 @@ class ImageEditor(Component):
transforms: Iterable[Literal["crop"]] = ("crop",),
eraser: Eraser | None | Literal[False] = None,
brush: Brush | None | Literal[False] = None,
format: str = "webp",
):
"""
Parameters:
@ -161,6 +186,8 @@ class ImageEditor(Component):
transforms: The transforms tools to make available to users. "crop" allows the user to crop the image.
eraser: The options for the eraser tool in the image editor. Should be an instance of the `gr.Eraser` class, or None to use the default settings. Can also be False to hide the eraser tool.
brush: The options for the brush tool in the image editor. Should be an instance of the `gr.Brush` class, or None to use the default settings. Can also be False to hide the brush tool, which will also hide the eraser tool.
format: Format to save image if it does not already have a valid format (e.g. if the image is being returned to the frontend as a numpy array or PIL Image). The format should be supported by the PIL library. This parameter has no effect on SVG files.
"""
self._selectable = _selectable
self.mirror_webcam = mirror_webcam
@ -195,6 +222,8 @@ class ImageEditor(Component):
self.transforms = transforms
self.eraser = Eraser() if eraser is None else eraser
self.brush = Brush() if brush is None else brush
self.blob_storage: dict[str, EditorDataBlobs] = {}
self.format = format
super().__init__(
label=label,
@ -213,12 +242,19 @@ class ImageEditor(Component):
def convert_and_format_image(
self,
file: FileData | None,
file: FileData | None | bytes,
) -> np.ndarray | PIL.Image.Image | str | None:
if file is None:
return None
im = PIL.Image.open(file.path)
if file.orig_name:
im = (
PIL.Image.open(file.path)
if isinstance(file, FileData)
else PIL.Image.open(BytesIO(file))
)
if isinstance(file, (bytes, bytearray, memoryview)):
name = "image"
suffix = self.format
elif file.orig_name:
p = Path(file.orig_name)
name = p.stem
suffix = p.suffix.replace(".", "")
@ -226,7 +262,7 @@ class ImageEditor(Component):
suffix = "jpeg"
else:
name = "image"
suffix = "png"
suffix = self.format
with warnings.catch_warnings():
warnings.simplefilter("ignore")
im = im.convert(self.image_mode)
@ -249,16 +285,41 @@ class ImageEditor(Component):
Returns:
Passes the uploaded images as an instance of EditorValue, which is just a `dict` with keys: 'background', 'layers', and 'composite'. The values corresponding to 'background' and 'composite' are images, while 'layers' is a `list` of images. The images are of type `PIL.Image`, `np.array`, or `str` filepath, depending on the `type` parameter.
"""
if payload is None:
return payload
_payload = payload
if payload is not None and payload.id is not None:
cached = self.blob_storage.get(payload.id)
_payload = (
EditorDataBlobs(
background=cached.background,
layers=cached.layers,
composite=cached.composite,
)
if cached
else None
)
elif _payload is None:
return _payload
else:
_payload = payload
bg = None
layers = None
composite = None
if _payload is not None:
bg = self.convert_and_format_image(_payload.background)
layers = (
[self.convert_and_format_image(layer) for layer in _payload.layers]
if _payload.layers
else None
)
composite = self.convert_and_format_image(_payload.composite)
if payload is not None and payload.id is not None:
self.blob_storage.pop(payload.id)
bg = self.convert_and_format_image(payload.background)
layers = (
[self.convert_and_format_image(layer) for layer in payload.layers]
if payload.layers
else None
)
composite = self.convert_and_format_image(payload.composite)
return {
"background": bg,
"layers": [x for x in layers if x is not None] if layers else [],
@ -289,7 +350,7 @@ class ImageEditor(Component):
path=image_utils.save_image(
cast(Union[np.ndarray, PIL.Image.Image, str], layer),
self.GRADIO_CACHE,
format="png",
format=self.format,
)
)
for layer in value["layers"]
@ -299,20 +360,29 @@ class ImageEditor(Component):
)
return EditorData(
background=FileData(
path=image_utils.save_image(value["background"], self.GRADIO_CACHE)
)
if value["background"] is not None
else None,
layers=layers,
composite=FileData(
path=image_utils.save_image(
cast(Union[np.ndarray, PIL.Image.Image, str], value["composite"]),
self.GRADIO_CACHE,
background=(
FileData(
path=image_utils.save_image(
value["background"], self.GRADIO_CACHE, format=self.format
)
)
)
if value["composite"] is not None
else None,
if value["background"] is not None
else None
),
layers=layers,
composite=(
FileData(
path=image_utils.save_image(
cast(
Union[np.ndarray, PIL.Image.Image, str], value["composite"]
),
self.GRADIO_CACHE,
format=self.format,
)
)
if value["composite"] is not None
else None
),
)
def example_payload(self) -> Any:
@ -330,3 +400,33 @@ class ImageEditor(Component):
"layers": [],
"composite": None,
}
@server
def accept_blobs(self, data: AcceptBlobs):
"""
Accepts a dictionary of image blobs, where the keys are 'background', 'layers', and 'composite', and the values are binary file-like objects.
"""
type = data.data["type"]
index = (
int(data.data["index"])
if data.data["index"] and data.data["index"] != "null"
else None
)
file = data.files[0][1]
id = data.data["id"]
current = self.blob_storage.get(
id, EditorDataBlobs(background=None, layers=[], composite=None)
)
if type == "layer" and index is not None:
if index >= len(current.layers):
current.layers.extend([None] * (index + 1 - len(current.layers)))
current.layers[index] = file
elif type == "background":
current.background = file
elif type == "composite":
current.composite = file
self.blob_storage[id] = current

View File

@ -41,7 +41,7 @@ class Plot(Component):
self,
value: Any | None = None,
*,
format: str = "png",
format: str = "webp",
label: str | None = None,
every: float | None = None,
show_label: bool | None = None,

View File

@ -1,5 +1,6 @@
"""Pydantic data models and other dataclasses. This is the only file that uses Optional[]
typing syntax instead of | None syntax to work with pydantic"""
from __future__ import annotations
import pathlib
@ -7,7 +8,7 @@ import secrets
import shutil
from abc import ABC, abstractmethod
from enum import Enum, auto
from typing import TYPE_CHECKING, Any, List, Optional, Union
from typing import TYPE_CHECKING, Any, List, Optional, Tuple, Union
from fastapi import Request
from gradio_client.utils import traverse
@ -101,13 +102,25 @@ class ResetBody(BaseModel):
event_id: str
class ComponentServerBody(BaseModel):
class ComponentServerJSONBody(BaseModel):
session_hash: str
component_id: int
fn_name: str
data: Any
class DataWithFiles(BaseModel):
data: Any
files: List[Tuple[str, bytes]]
class ComponentServerBlobBody(BaseModel):
session_hash: str
component_id: int
fn_name: str
data: DataWithFiles
class InterfaceTypes(Enum):
STANDARD = auto()
INPUT_ONLY = auto()

View File

@ -539,6 +539,10 @@ class Events:
"key_up",
doc="This listener is triggered when the user presses a key while the {{ component }} is focused.",
)
apply = EventListener(
"apply",
doc="This listener is triggered when the user applies changes to the {{ component }} through an integrated UI action.",
)
class LikeData(EventData):

View File

@ -1,6 +1,7 @@
"""
Defines helper methods useful for loading and caching Interface examples.
"""
from __future__ import annotations
import ast

View File

@ -16,7 +16,7 @@ def format_image(
type: Literal["numpy", "pil", "filepath"],
cache_dir: str,
name: str = "image",
format: str = "png",
format: str = "webp",
) -> np.ndarray | PIL.Image.Image | str | None:
"""Helper method to format an image based on self.type"""
if im is None:
@ -48,10 +48,8 @@ def format_image(
def save_image(
y: np.ndarray | PIL.Image.Image | str | Path, cache_dir: str, format: str = "png"
y: np.ndarray | PIL.Image.Image | str | Path, cache_dir: str, format: str = "webp"
):
# numpy gets saved to png as default format
# PIL gets saved to its original format if possible
if isinstance(y, np.ndarray):
path = processing_utils.save_img_array_to_cache(
y, cache_dir=cache_dir, format=format

View File

@ -148,7 +148,7 @@ def save_pil_to_cache(
img: Image.Image,
cache_dir: str,
name: str = "image",
format: str = "png",
format: str = "webp",
) -> str:
bytes_data = encode_pil_to_bytes(img, format)
temp_dir = Path(cache_dir) / hash_bytes(bytes_data)
@ -159,7 +159,7 @@ def save_pil_to_cache(
def save_img_array_to_cache(
arr: np.ndarray, cache_dir: str, format: str = "png"
arr: np.ndarray, cache_dir: str, format: str = "webp"
) -> str:
pil_image = Image.fromarray(_convert(arr, np.uint8, force_copy=False))
return save_pil_to_cache(pil_image, cache_dir, format=format)

View File

@ -31,13 +31,20 @@ from typing import (
List,
Optional,
Type,
Union,
)
import fastapi
import httpx
import markupsafe
import orjson
from fastapi import BackgroundTasks, Depends, FastAPI, HTTPException, status
from fastapi import (
BackgroundTasks,
Depends,
FastAPI,
HTTPException,
status,
)
from fastapi.responses import (
FileResponse,
HTMLResponse,
@ -52,13 +59,16 @@ from gradio_client.utils import ServerMessage
from jinja2.exceptions import TemplateNotFound
from multipart.multipart import parse_options_header
from starlette.background import BackgroundTask
from starlette.datastructures import UploadFile as StarletteUploadFile
from starlette.responses import RedirectResponse, StreamingResponse
import gradio
from gradio import ranged_response, route_utils, utils, wasm_utils
from gradio.context import Context
from gradio.data_classes import (
ComponentServerBody,
ComponentServerBlobBody,
ComponentServerJSONBody,
DataWithFiles,
PredictBody,
ResetBody,
SimplePredictBody,
@ -889,9 +899,71 @@ class App(FastAPI):
media_type="text/event-stream",
)
@app.post("/component_server", dependencies=[Depends(login_check)])
@app.post("/component_server/", dependencies=[Depends(login_check)])
def component_server(body: ComponentServerBody):
async def get_item_or_file(
request: fastapi.Request,
) -> Union[ComponentServerJSONBody, ComponentServerBlobBody]:
content_type = request.headers.get("Content-Type")
if isinstance(content_type, str) and content_type.startswith(
"multipart/form-data"
):
files = []
data = {}
async with request.form() as form:
for key, value in form.items():
if (
isinstance(value, list)
and len(value) > 1
and isinstance(value[0], StarletteUploadFile)
):
for i, v in enumerate(value):
if isinstance(v, StarletteUploadFile):
filename = v.filename
contents = await v.read()
files.append((filename, contents))
else:
data[f"{key}-{i}"] = v
elif isinstance(value, StarletteUploadFile):
filename = value.filename
contents = await value.read()
files.append((filename, contents))
else:
data[key] = value
return ComponentServerBlobBody(
data=DataWithFiles(data=data, files=files),
component_id=data["component_id"],
session_hash=data["session_hash"],
fn_name=data["fn_name"],
)
else:
try:
data = await request.json()
return ComponentServerJSONBody(
data=data["data"],
component_id=data["component_id"],
session_hash=data["session_hash"],
fn_name=data["fn_name"],
)
except Exception:
raise HTTPException(
status_code=status.HTTP_400_BAD_REQUEST,
detail="Invalid JSON body.",
) from None
@app.post(
"/component_server",
dependencies=[Depends(login_check)],
)
@app.post(
"/component_server/",
dependencies=[Depends(login_check)],
)
async def component_server(
request: fastapi.Request,
):
body = await get_item_or_file(request)
state = app.state_holder[body.session_hash]
component_id = body.component_id
block: Block

View File

@ -107,6 +107,7 @@ class Sketchpad(components.ImageEditor):
transforms: Iterable[Literal["crop"]] = ("crop",),
eraser: Eraser | None = None,
brush: Brush | None = None,
format: str = "webp",
):
if not brush:
brush = Brush(colors=["#000000"], color_mode="fixed")
@ -136,6 +137,7 @@ class Sketchpad(components.ImageEditor):
transforms=transforms,
eraser=eraser,
brush=brush,
format=format,
)
@ -176,6 +178,7 @@ class Paint(components.ImageEditor):
transforms: Iterable[Literal["crop"]] = ("crop",),
eraser: Eraser | None = None,
brush: Brush | None = None,
format: str = "webp",
):
super().__init__(
value=value,
@ -203,6 +206,7 @@ class Paint(components.ImageEditor):
transforms=transforms,
eraser=eraser,
brush=brush,
format=format,
)
@ -247,6 +251,7 @@ class ImageMask(components.ImageEditor):
transforms: Iterable[Literal["crop"]] = ("crop",),
eraser: Eraser | None = None,
brush: Brush | None = None,
format: str = "webp",
):
if not brush:
brush = Brush(colors=["#000000"], color_mode="fixed")
@ -276,6 +281,7 @@ class ImageMask(components.ImageEditor):
transforms=transforms,
eraser=eraser,
brush=brush,
format=format,
)
@ -288,11 +294,9 @@ class PlayableVideo(components.Video):
def __init__(
self,
value: str
| Path
| tuple[str | Path, str | Path | None]
| Callable
| None = None,
value: (
str | Path | tuple[str | Path, str | Path | None] | Callable | None
) = None,
*,
format: Literal["mp4"] = "mp4",
sources: list[Literal["upload", "webcam"]] | None = None,

View File

@ -0,0 +1,69 @@
import { test, expect, drag_and_drop_file } from "@gradio/tootils";
test("upload events work as expected", async ({ page }) => {
await page.getByLabel("Upload button").first().click();
const uploader = page.locator("input[type=file]").first();
await uploader.setInputFiles(["./test/files/cheetah1.jpg"]);
await expect(page.locator("#upload h2")).toContainText("1");
});
test("change events work as expected", async ({ page }) => {
await page.getByLabel("Upload button").first().click();
const uploader = page.locator("input[type=file]").first();
await uploader.setInputFiles(["./test/files/cheetah1.jpg"]);
const change_text = page.locator("#change h2");
await expect(change_text).toContainText("1");
await page.getByLabel("Draw button").first().click();
const canvas = page.locator("#image_editor canvas").first();
await canvas.click({ position: { x: 100, y: 100 } });
await expect(change_text).toContainText("2");
await page.getByLabel("Erase button").first().click();
await canvas.click({ position: { x: 100, y: 100 } });
await expect(change_text).toContainText("3");
await page.getByLabel("Clear canvas").first().click();
await expect(change_text).toContainText("4");
});
test("input events work as expected", async ({ page }) => {
await page.getByLabel("Upload button").first().click();
const uploader = page.locator("input[type=file]").first();
await uploader.setInputFiles(["./test/files/cheetah1.jpg"]);
const input_text = page.locator("#input h2");
await expect(input_text).toContainText("1");
await page.getByLabel("Draw button").first().click();
const canvas = page.locator("#image_editor canvas").first();
await canvas.click({ position: { x: 100, y: 100 } });
await expect(input_text).toContainText("2");
await page.getByLabel("Erase button").first().click();
await canvas.click({ position: { x: 100, y: 100 } });
await expect(input_text).toContainText("3");
await page.getByLabel("Clear canvas").first().click();
await expect(input_text).toContainText("4");
});
test("apply events work as expected", async ({ page }) => {
const apply_text = page.locator("#apply h2");
const apply_button = page.getByLabel("Save changes").first();
await page.getByLabel("Draw button").first().click();
const canvas = page.locator("#image_editor canvas").first();
await canvas.click({ position: { x: 100, y: 100 } });
await apply_button.click();
await expect(apply_text).toContainText("1");
await page.getByLabel("Erase button").first().click();
await canvas.click({ position: { x: 100, y: 100 } });
await page.getByLabel("Clear canvas").first().click();
await apply_button.click();
await expect(apply_text).toContainText("2");
});

View File

@ -26,7 +26,11 @@
class="image-container"
style="width: 500px; position: relative;border-radius: var(--radius-lg);overflow: hidden;"
>
<ImageEditor i18n={get(format)} {...args} />
<ImageEditor
i18n={get(format)}
{...args}
server={{ accept_blobs: () => {} }}
/>
</div>
</Template>

View File

@ -1,4 +1,4 @@
<svelte:options accessors={true} />
<svelte:options accessors={true} immutable={true} />
<script lang="ts">
import type { Brush, Eraser } from "./shared/tools/Brush.svelte";
@ -13,6 +13,7 @@
import { Block } from "@gradio/atoms";
import { StatusTracker } from "@gradio/statustracker";
import type { LoadingStatus } from "@gradio/statustracker";
import { tick } from "svelte";
export let elem_id = "";
export let elem_classes: string[] = [];
@ -26,6 +27,7 @@
export let show_label: boolean;
export let show_download_button: boolean;
export let root: string;
export let value_is_output = false;
export let height: number | undefined;
export let width: number | undefined;
@ -49,14 +51,17 @@
export let transforms: "crop"[] = ["crop"];
export let attached_events: string[] = [];
export let server: {
accept_blobs: (a: any) => void;
};
export let gradio: Gradio<{
change: never;
error: string;
input: never;
edit: never;
stream: never;
drag: never;
apply: never;
upload: never;
clear: never;
select: SelectData;
@ -64,8 +69,14 @@
}>;
let editor_instance: InteractiveImageEditor;
let image_id: null | string = null;
export async function get_value(): Promise<ImageBlobs> {
export async function get_value(): Promise<ImageBlobs | { id: string }> {
if (image_id) {
const val = { id: image_id };
image_id = null;
return val;
}
// @ts-ignore
loading_status = { status: "pending" };
const blobs = await editor_instance.get_data();
@ -89,8 +100,15 @@
}
function handle_save(): void {
gradio.dispatch("apply");
}
function handle_history_change(): void {
gradio.dispatch("change");
gradio.dispatch("input");
if (!value_is_output) {
gradio.dispatch("input");
tick().then((_) => (value_is_output = false));
}
}
</script>
@ -149,6 +167,8 @@
/>
<InteractiveImageEditor
on:change={() => handle_history_change()}
bind:image_id
{crop_size}
{value}
bind:this={editor_instance}
@ -159,10 +179,8 @@
on:save={(e) => handle_save()}
on:edit={() => gradio.dispatch("edit")}
on:clear={() => gradio.dispatch("clear")}
on:stream={() => gradio.dispatch("stream")}
on:drag={({ detail }) => (dragging = detail)}
on:upload={() => gradio.dispatch("upload")}
on:select={({ detail }) => gradio.dispatch("select", detail)}
on:share={({ detail }) => gradio.dispatch("share", detail)}
on:error={({ detail }) => {
loading_status = loading_status || {};
@ -172,9 +190,10 @@
on:error
{brush}
{eraser}
changeable={attached_events.includes("change")}
changeable={attached_events.includes("apply")}
i18n={gradio.i18n}
{transforms}
accept_blobs={server.accept_blobs}
></InteractiveImageEditor>
</Block>
{/if}

View File

@ -69,6 +69,7 @@
const dispatch = createEventDispatcher<{
clear?: never;
save: void;
change: void;
}>();
export let crop_constraint = false;
@ -103,6 +104,8 @@
const { can_redo, can_undo, current_history } = CommandManager;
$: $current_history.previous, dispatch("change");
$: {
history = !!$current_history.previous || $active_tool !== "bg";
}
@ -363,9 +366,6 @@
margin-bottom: var(--size-1);
}
.bg {
}
.image-container {
display: flex;
height: 100%;

View File

@ -41,9 +41,12 @@
composite: null
};
export let transforms: "crop"[] = ["crop"];
export let accept_blobs: (a: any) => void;
const dispatch = createEventDispatcher<{
clear?: never;
upload?: never;
change?: never;
}>();
let editor: ImageEditor;
@ -56,6 +59,8 @@
return !!o;
}
$: if (bg) dispatch("upload");
export async function get_data(): Promise<ImageBlobs> {
const blobs = await editor.get_blobs();
@ -107,11 +112,82 @@
let bg = false;
let history = false;
export let image_id: null | string = null;
$: editor &&
editor.set_tool &&
(sources && sources.length
? editor.set_tool("bg")
: editor.set_tool("draw"));
type BinaryImages = [string, string, File, number | null][];
function nextframe(): Promise<void> {
return new Promise((resolve) => setTimeout(() => resolve(), 30));
}
let uploading = false;
let pending = false;
async function handle_change(e: CustomEvent<Blob | any>): Promise<void> {
if (uploading) {
pending = true;
return;
}
uploading = true;
await nextframe();
const blobs = await editor.get_blobs();
const images: BinaryImages = [];
let id = Math.random().toString(36).substring(2);
if (blobs.background)
images.push([
id,
"background",
new File([blobs.background], "background.png"),
null
]);
if (blobs.composite)
images.push([
id,
"composite",
new File([blobs.composite], "composite.png"),
null
]);
blobs.layers.forEach((layer, i) => {
if (layer)
images.push([
id as string,
`layer`,
new File([layer], `layer_${i}.png`),
i
]);
});
await Promise.all(
images.map(async ([image_id, type, data, index]) => {
return accept_blobs({
binary: true,
data: { file: data, id: image_id, type, index }
});
})
);
image_id = id;
dispatch("change");
await nextframe();
uploading = false;
if (pending) {
pending = false;
uploading = false;
handle_change(e);
}
}
let active_mode: "webcam" | "color" | null = null;
</script>
<BlockLabel
@ -123,6 +199,7 @@
bind:this={editor}
{changeable}
on:save
on:change={handle_change}
on:clear={() => dispatch("clear")}
bind:history
bind:bg
@ -136,6 +213,7 @@
{root}
{sources}
bind:bg
bind:active_mode
background_file={value?.background || null}
></Sources>
{/if}
@ -159,7 +237,7 @@
<Layers layer_files={value?.layers || null} />
{#if !bg && !history}
{#if !bg && !history && active_mode !== "webcam"}
<div class="empty wrap">
{#if sources && sources.length}
<div>Upload an image</div>

View File

@ -68,14 +68,17 @@
t_p = y_offset / $editor_box.child_height;
c = crop_canvas($pixi!.renderer, $pixi!.mask_container, crop, 0.2);
c.start(...$dimensions, current_crop);
c.start(...$dimensions, current_crop, false);
c.continue([
l_p * $dimensions[0],
t_p * $dimensions[1],
w_p * $dimensions[0],
h_p * $dimensions[1]
]);
c.continue(
[
l_p * $dimensions[0],
t_p * $dimensions[1],
w_p * $dimensions[0],
h_p * $dimensions[1]
],
false
);
c.stop();
c.execute();

View File

@ -1,5 +1,5 @@
<script lang="ts">
import { getContext, onMount, tick } from "svelte";
import { getContext, onMount, tick, createEventDispatcher } from "svelte";
import { type ToolContext, TOOL_KEY } from "./Tools.svelte";
import { type EditorContext, EDITOR_KEY } from "../ImageEditor.svelte";
import {
@ -28,9 +28,13 @@
const { pixi, dimensions, register_context, reset, editor_box } =
getContext<EditorContext>(EDITOR_KEY);
let active_mode: "webcam" | "color" | null = null;
export let active_mode: "webcam" | "color" | null = null;
let background: Blob | File | null;
const dispatch = createEventDispatcher<{
upload: never;
}>();
const sources_meta = {
upload: {
icon: UploadIcon,
@ -203,7 +207,6 @@
width: 100%;
left: 0;
right: 0;
background-color: rgba(0, 0, 0, 0.9);
margin: auto;
z-index: var(--layer-top);
display: flex;

View File

@ -67,6 +67,7 @@
};
$: sub_menu = $active_tool && metas[$active_tool];
let current_color = writable("#000000");
let sub_tool: upload_tool | transform_tool | brush_tool | eraser_tool | null;
const tool_context: ToolContext = {

View File

@ -14,10 +14,14 @@ export interface CropCommand extends Command {
width: number,
height: number,
previous_crop: [number, number, number, number],
preview?: boolean,
set_previous?: boolean
) => void;
stop: () => number;
continue: (crop_size: [number, number, number, number]) => void;
continue: (
crop_size: [number, number, number, number],
preview?: boolean
) => void;
}
export function crop_canvas(
@ -74,6 +78,7 @@ export function crop_canvas(
_width: number,
_height: number,
_previous_crop: [number, number, number, number],
_preview = true,
set_previous = true
) => {
clean = false;
@ -82,7 +87,7 @@ export function crop_canvas(
height: _height
});
crop_mask(_width, _height, _previous_crop, true);
crop_mask(_width, _height, _previous_crop, _preview);
sprite = new Sprite(text);
mask_container.mask = sprite;
width = _width;
@ -90,10 +95,10 @@ export function crop_canvas(
if (set_previous)
previous_crop = JSON.parse(JSON.stringify(_previous_crop));
},
continue: (crop_size: [number, number, number, number]) => {
continue: (crop_size: [number, number, number, number], preview = true) => {
final_crop = JSON.parse(JSON.stringify(crop_size));
if (spring_value === 0.2) {
crop_mask(width, height, final_crop, true);
crop_mask(width, height, final_crop, preview);
} else {
alpha_spring.set(0.2);
}

View File

@ -25,6 +25,7 @@ def copy_all_demos(source_dir: str, dest_dir: str):
"fake_diffusion_with_gif",
"file_explorer_component_events",
"image_mod_default_image",
"image_editor_events",
"image_segmentation",
"interface_random_slider",
"kitchen_sink",

View File

@ -638,6 +638,8 @@ class TestImageEditor:
},
"proxy_url": None,
"name": "imageeditor",
"server_fns": ["accept_blobs"],
"format": "webp",
}
def test_process_example(self):
@ -684,7 +686,7 @@ class TestImage:
"visible": True,
"value": None,
"interactive": None,
"format": "png",
"format": "webp",
"proxy_url": None,
"mirror_webcam": True,
"_selectable": False,
@ -722,7 +724,7 @@ class TestImage:
return np.random.randint(0, 256, (height, width, 3))
iface = gr.Interface(generate_noise, ["slider", "slider"], "image")
assert iface(10, 20).endswith(".png")
assert iface(10, 20).endswith(".webp")
def test_static(self):
"""
@ -754,7 +756,7 @@ class TestImage:
assert image.path.endswith("jpeg")
image_pre = component.preprocess(FileData(path=file_path))
assert image_pre.endswith("png")
assert image_pre.endswith("webp")
image_pre = component.preprocess(
FileData(path="test/test_files/cheetah1.jpg", orig_name="cheetah1.jpg")
@ -780,7 +782,7 @@ class TestPlot:
with utils.MatplotlibBackendMananger():
output = await iface.process_api(fn_index=0, inputs=[10], state={})
assert output["data"][0]["type"] == "matplotlib"
assert output["data"][0]["plot"].startswith("data:image/png;base64")
assert output["data"][0]["plot"].startswith("data:image/webp;base64")
def test_static(self):
"""
@ -1994,7 +1996,7 @@ class TestAnnotatedImage:
"container": True,
"min_width": 160,
"scale": None,
"format": "png",
"format": "webp",
"color_map": None,
"height": None,
"width": None,

View File

@ -261,9 +261,7 @@ class TestProcessExamples:
cache_examples=True,
)
prediction = io.examples_handler.load_from_cache(0)
assert client_utils.encode_url_or_file_to_base64(prediction[0].path).startswith(
"data:image/png;base64,iVBORw0KGgoAAA"
)
assert prediction[0].path.endswith(".webp")
def test_caching_audio(self, patched_cache_folder):
io = gr.Interface(

View File

@ -135,7 +135,7 @@ class TestImagePreprocessing:
input_img.save(gradio_temp_dir / "test_test_image.png")
file_obj = processing_utils.save_pil_to_cache(
input_img, cache_dir=gradio_temp_dir
input_img, cache_dir=gradio_temp_dir, format="png"
)
output_img = Image.open(file_obj)
@ -170,16 +170,19 @@ class TestImagePreprocessing:
)
img_cp2 = Image.open(str(gradio_temp_dir / "img_color_profile_2.png"))
img_path = processing_utils.save_pil_to_cache(img, cache_dir=gradio_temp_dir)
img_path = processing_utils.save_pil_to_cache(
img, cache_dir=gradio_temp_dir, format="png"
)
img_metadata_path = processing_utils.save_pil_to_cache(
img_metadata, cache_dir=gradio_temp_dir
img_metadata, cache_dir=gradio_temp_dir, format="png"
)
img_cp1_path = processing_utils.save_pil_to_cache(
img_cp1, cache_dir=gradio_temp_dir
img_cp1, cache_dir=gradio_temp_dir, format="png"
)
img_cp2_path = processing_utils.save_pil_to_cache(
img_cp2, cache_dir=gradio_temp_dir
img_cp2, cache_dir=gradio_temp_dir, format="png"
)
assert len({img_path, img_metadata_path, img_cp1_path, img_cp2_path}) == 4
def test_resize_and_crop(self):