Remove gr.mix (#6184)

* remove mix

* add changeset

* fix script

---------

Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com>
This commit is contained in:
Abubakar Abid 2023-10-31 04:28:39 -07:00 committed by GitHub
parent 911829ac27
commit 86edc01995
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
14 changed files with 5 additions and 234 deletions

View File

@ -0,0 +1,5 @@
---
"gradio": minor
---
feat:Remove gr.mix

View File

@ -1 +0,0 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: interface_parallel"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "greeter_1 = gr.Interface(lambda name: f\"Hello {name}!\", inputs=\"textbox\", outputs=gr.Textbox(label=\"Greeter 1\"))\n", "greeter_2 = gr.Interface(lambda name: f\"Greetings {name}!\", inputs=\"textbox\", outputs=gr.Textbox(label=\"Greeter 2\"))\n", "demo = gr.Parallel(greeter_1, greeter_2)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -1,8 +0,0 @@
import gradio as gr
greeter_1 = gr.Interface(lambda name: f"Hello {name}!", inputs="textbox", outputs=gr.Textbox(label="Greeter 1"))
greeter_2 = gr.Interface(lambda name: f"Greetings {name}!", inputs="textbox", outputs=gr.Textbox(label="Greeter 2"))
demo = gr.Parallel(greeter_1, greeter_2)
if __name__ == "__main__":
demo.launch()

View File

@ -1 +0,0 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: interface_parallel_load"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "generator1 = gr.load(\"huggingface/gpt2\")\n", "generator2 = gr.load(\"huggingface/gpt2-xl\")\n", "\n", "demo = gr.Parallel(generator1, generator2)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -1,9 +0,0 @@
import gradio as gr
generator1 = gr.load("huggingface/gpt2")
generator2 = gr.load("huggingface/gpt2-xl")
demo = gr.Parallel(generator1, generator2)
if __name__ == "__main__":
demo.launch()

View File

@ -1 +0,0 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: interface_series"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "get_name = gr.Interface(lambda name: name, inputs=\"textbox\", outputs=\"textbox\")\n", "prepend_hello = gr.Interface(lambda name: f\"Hello {name}!\", inputs=\"textbox\", outputs=\"textbox\")\n", "append_nice = gr.Interface(lambda greeting: f\"Nice to meet you!\",\n", " inputs=\"textbox\", outputs=gr.Textbox(label=\"Greeting\"))\n", "translator = gr.Interface(lambda s: \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\", gr.Textbox(), gr.Image())\n", "demo = gr.Series(get_name, translator, append_nice)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -1,11 +0,0 @@
import gradio as gr
get_name = gr.Interface(lambda name: name, inputs="textbox", outputs="textbox")
prepend_hello = gr.Interface(lambda name: f"Hello {name}!", inputs="textbox", outputs="textbox")
append_nice = gr.Interface(lambda greeting: f"Nice to meet you!",
inputs="textbox", outputs=gr.Textbox(label="Greeting"))
translator = gr.Interface(lambda s: "https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg", gr.Textbox(), gr.Image())
demo = gr.Series(get_name, translator, append_nice)
if __name__ == "__main__":
demo.launch()

View File

@ -1 +0,0 @@
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: interface_series_load"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import numpy as np\n", "\n", "generator = gr.load(\"huggingface/gpt2\")\n", "\n", "\n", "translator = gr.Interface(lambda s: \"https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg\", gr.Textbox(), gr.Image())\n", "\n", "demo = gr.Series(generator, translator, description=\"This demo combines two Spaces: a text generator (`huggingface/gpt2`) and a text translator (`huggingface/t5-small`). The first Space takes a prompt as input and generates a text. The second Space takes the generated text as input and translates it into another language.\")\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}

View File

@ -1,12 +0,0 @@
import gradio as gr
import numpy as np
generator = gr.load("huggingface/gpt2")
translator = gr.Interface(lambda s: "https://gradio-builds.s3.amazonaws.com/diffusion_image/cute_dog.jpg", gr.Textbox(), gr.Image())
demo = gr.Series(generator, translator, description="This demo combines two Spaces: a text generator (`huggingface/gpt2`) and a text translator (`huggingface/t5-small`). The first Space takes a prompt as input and generates a text. The second Space takes the generated text as input and translates it into another language.")
if __name__ == "__main__":
demo.launch()

View File

@ -78,7 +78,6 @@ from gradio.helpers import create_examples as Examples # noqa: N812
from gradio.interface import Interface, TabbedInterface, close_all
from gradio.ipython_ext import load_ipython_extension
from gradio.layouts import Accordion, Column, Group, Row, Tab, TabItem, Tabs
from gradio.mix import Parallel, Series
from gradio.oauth import OAuthProfile
from gradio.routes import Request, mount_gradio_app
from gradio.templates import (

View File

@ -38,8 +38,6 @@ def _setup_config(
f"{demo_name} = gr\\.Blocks",
f"{demo_name} = gr\\.Interface",
f"{demo_name} = gr\\.ChatInterface",
f"{demo_name} = gr\\.Series",
f"{demo_name} = gr\\.Parallel",
f"{demo_name} = gr\\.TabbedInterface",
]

View File

@ -1,129 +0,0 @@
"""
Ways to transform interfaces to produce new interfaces
"""
import asyncio
import warnings
from gradio_client.documentation import document, set_documentation_group
import gradio
set_documentation_group("mix_interface")
@document()
class Parallel(gradio.Interface):
"""
Creates a new Interface consisting of multiple Interfaces in parallel (comparing their outputs).
The Interfaces to put in Parallel must share the same input components (but can have different output components).
Demos: interface_parallel, interface_parallel_load
Guides: advanced-interface-features
"""
def __init__(self, *interfaces: gradio.Interface, **options):
"""
Parameters:
interfaces: any number of Interface objects that are to be compared in parallel
options: additional kwargs that are passed into the new Interface object to customize it
Returns:
an Interface object comparing the given models
"""
outputs = []
for interface in interfaces:
if not (isinstance(interface, gradio.Interface)):
warnings.warn(
"Parallel requires all inputs to be of type Interface. "
"May not work as expected."
)
outputs.extend(interface.output_components)
async def parallel_fn(*args):
return_values_with_durations = await asyncio.gather(
*[interface.call_function(0, list(args)) for interface in interfaces]
)
return_values = [rv["prediction"] for rv in return_values_with_durations]
combined_list = []
for interface, return_value in zip(interfaces, return_values):
if len(interface.output_components) == 1:
combined_list.append(return_value)
else:
combined_list.extend(return_value)
if len(outputs) == 1:
return combined_list[0]
return combined_list
parallel_fn.__name__ = " | ".join([io.__name__ for io in interfaces])
kwargs = {
"fn": parallel_fn,
"inputs": interfaces[0].input_components,
"outputs": outputs,
}
kwargs.update(options)
super().__init__(**kwargs)
@document()
class Series(gradio.Interface):
"""
Creates a new Interface from multiple Interfaces in series (the output of one is fed as the input to the next,
and so the input and output components must agree between the interfaces).
Demos: interface_series, interface_series_load
Guides: advanced-interface-features
"""
def __init__(self, *interfaces: gradio.Interface, **options):
"""
Parameters:
interfaces: any number of Interface objects that are to be connected in series
options: additional kwargs that are passed into the new Interface object to customize it
Returns:
an Interface object connecting the given models
"""
async def connected_fn(*data):
for idx, interface in enumerate(interfaces):
# skip preprocessing for first interface since the Series interface will include it
if idx > 0 and not (interface.api_mode):
data = [
input_component.preprocess(data[i])
for i, input_component in enumerate(interface.input_components)
]
# run all of predictions sequentially
data = (await interface.call_function(0, list(data)))["prediction"]
if len(interface.output_components) == 1:
data = [data]
# skip postprocessing for final interface since the Series interface will include it
if idx < len(interfaces) - 1 and not (interface.api_mode):
data = [
output_component.postprocess(data[i])
for i, output_component in enumerate(
interface.output_components
)
]
if len(interface.output_components) == 1: # type: ignore
return data[0]
return data
for interface in interfaces:
if not (isinstance(interface, gradio.Interface)):
warnings.warn(
"Series requires all inputs to be of type Interface. May "
"not work as expected."
)
connected_fn.__name__ = " => ".join([io.__name__ for io in interfaces])
kwargs = {
"fn": connected_fn,
"inputs": interfaces[0].input_components,
"outputs": interfaces[-1].output_components,
"_api_mode": interfaces[0].api_mode, # TODO: set api_mode per-interface
}
kwargs.update(options)
super().__init__(**kwargs)

View File

@ -26,9 +26,7 @@ def copy_all_demos(source_dir: str, dest_dir: str):
"fake_diffusion_with_gif",
"image_mod_default_image",
"image_segmentation",
"interface_parallel_load",
"interface_random_slider",
"interface_series_load",
"kitchen_sink",
"kitchen_sink_random",
"matrix_transpose",

View File

@ -1,56 +0,0 @@
import gradio as gr
from gradio import mix
from gradio.external import TooManyRequestsError
"""
WARNING: Some of these tests have an external dependency: namely that Hugging Face's Hub and Space APIs do not change, and they keep their most famous models up.
So if, e.g. Spaces is down, then these test will not pass.
"""
class TestSeries:
def test_in_interface(self):
io1 = gr.Interface(lambda x: f"{x} World", "textbox", gr.Textbox())
io2 = gr.Interface(lambda x: f"{x}!", "textbox", gr.Textbox())
series = mix.Series(io1, io2)
assert series("Hello") == "Hello World!"
def test_with_external(self):
io1 = gr.load("spaces/gradio-tests/image-identity-newv4-sse")
io2 = gr.load("spaces/gradio-tests/image-classifier-newv4-sse")
series = mix.Series(io1, io2)
try:
assert series("gradio/test_data/lion.jpg")["label"] == "lion"
except TooManyRequestsError:
pass
class TestParallel:
def test_in_interface(self):
io1 = gr.Interface(lambda x: f"{x} World 1!", "textbox", gr.Textbox())
io2 = gr.Interface(lambda x: f"{x} World 2!", "textbox", gr.Textbox())
parallel = mix.Parallel(io1, io2)
assert parallel("Hello") == ["Hello World 1!", "Hello World 2!"]
def test_multiple_return_in_interface(self):
io1 = gr.Interface(
lambda x: (x, x + x), "textbox", [gr.Textbox(), gr.Textbox()]
)
io2 = gr.Interface(lambda x: f"{x} World 2!", "textbox", gr.Textbox())
parallel = mix.Parallel(io1, io2)
assert parallel("Hello") == [
"Hello",
"HelloHello",
"Hello World 2!",
]
def test_with_external(self):
io1 = gr.load("spaces/gradio-tests/english_to_spanishv4-sse")
io2 = gr.load("spaces/gradio-tests/english2germanv4-sse")
parallel = mix.Parallel(io1, io2)
try:
hello_es, hello_de = parallel("Hello")
assert "hola" in hello_es.lower()
assert "hallo" in hello_de.lower()
except TooManyRequestsError:
pass