mirror of
https://github.com/gradio-app/gradio.git
synced 2025-04-06 12:30:29 +08:00
Remove usage of deprecated gr.inputs and gr.outputs from website (#1796)
* remove deprecated api from docs * Remove usage of 'state' shortcut * Back out state changes * Back out state changes in creating_a_chatbot * Link chatbot guide to Variable docs
This commit is contained in:
parent
0dbc8bfcf3
commit
4a1879edaf
@ -17,7 +17,7 @@ def chat(message, history):
|
||||
return history, history
|
||||
|
||||
|
||||
chatbot = gr.Chatbot(color_map=("green", "pink")).style()
|
||||
chatbot = gr.Chatbot().style(color_map=("green", "pink"))
|
||||
demo = gr.Interface(
|
||||
chat,
|
||||
["text", "state"],
|
||||
|
@ -6,7 +6,7 @@ def image_mod(image):
|
||||
return image.rotate(45)
|
||||
|
||||
|
||||
demo = gr.Interface(image_mod, gr.inputs.Image(type="pil"), "image",
|
||||
demo = gr.Interface(image_mod, gr.Image(type="pil"), "image",
|
||||
flagging_options=["blurry", "incorrect", "other"], examples=[
|
||||
os.path.join(os.path.dirname(__file__), "images/cheetah1.jpg"),
|
||||
os.path.join(os.path.dirname(__file__), "images/lion.jpg"),
|
||||
|
@ -129,7 +129,7 @@ def inference(img):
|
||||
title = "EfficientNet-Lite4"
|
||||
description = "EfficientNet-Lite 4 is the largest variant and most accurate of the set of EfficientNet-Lite model. It is an integer-only quantized model that produces the highest accuracy of all of the EfficientNet models. It achieves 80.4% ImageNet top-1 accuracy, while still running in real-time (e.g. 30ms/image) on a Pixel 4 CPU."
|
||||
examples = [['catonnx.jpg']]
|
||||
gr.Interface(inference, gr.inputs.Image(type="filepath"), "label", title=title, description=description, examples=examples).launch()
|
||||
gr.Interface(inference, gr.Image(type="filepath"), "label", title=title, description=description, examples=examples).launch()
|
||||
```
|
||||
|
||||
|
||||
|
@ -68,7 +68,7 @@ gr.Interface.load("huggingface/gpt2").launch();
|
||||
|
||||
```python
|
||||
gr.Interface.load("huggingface/EleutherAI/gpt-j-6B",
|
||||
inputs=gr.inputs.Textbox(lines=5, label="Input Text") # customizes the input component
|
||||
inputs=gr.Textbox(lines=5, label="Input Text") # customizes the input component
|
||||
).launch()
|
||||
```
|
||||
|
||||
@ -123,7 +123,13 @@ This sets up a queue of workers to handle the predictions and return the respons
|
||||
|
||||
Your function may use data that persists beyond a single function call. If the data is something accessible to all function calls and all users, you can create a variable outside the function call and access it inside the function. For example, you may load a large model outside the function and use it inside the function so that every function call does not need to reload the model.
|
||||
|
||||
Another type of data persistence Gradio supports is session **state**, where data persists across multiple submits within a page load. However, data is *not* shared between different users of your model. To store data in a session state, you need to do three things: (1) Pass in an extra parameter into your function, which represents the state of the interface. (2) At the end of the function, return the updated value of the state as an extra return value (3) Add the `'state'` input and `'state'` output components when creating your `Interface`. See the chatbot example below:
|
||||
Another type of data persistence Gradio supports is session **state**, where data persists across multiple submits within a page load. However, data is *not* shared between different users of your model. To store data in a session state, you need to do three things:
|
||||
|
||||
1. Pass in an extra parameter into your function, which represents the state of the interface.
|
||||
2. At the end of the function, return the updated value of the state as an extra return value.
|
||||
3. Add the `'state'` input and `'state'` output components when creating your `Interface`
|
||||
|
||||
See the chatbot example below:
|
||||
|
||||
$code_chatbot_demo
|
||||
$demo_chatbot_demo
|
||||
|
@ -109,7 +109,7 @@ import gradio as gr
|
||||
gr.Interface(
|
||||
predict,
|
||||
inputs=[
|
||||
gr.inputs.Slider(0, 1000, label='Seed', default=42),
|
||||
gr.Slider(0, 1000, label='Seed', default=42),
|
||||
],
|
||||
outputs="image",
|
||||
).launch()
|
||||
@ -127,8 +127,8 @@ Generating 4 punks at a time is a good start, but maybe we'd like to control how
|
||||
gr.Interface(
|
||||
predict,
|
||||
inputs=[
|
||||
gr.inputs.Slider(0, 1000, label='Seed', default=42),
|
||||
gr.inputs.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!
|
||||
gr.Slider(0, 1000, label='Seed', default=42),
|
||||
gr.Slider(4, 64, label='Number of Punks', step=1, default=10), # Adding another slider!
|
||||
],
|
||||
outputs="image",
|
||||
).launch()
|
||||
@ -215,8 +215,8 @@ def predict(seed, num_punks):
|
||||
gr.Interface(
|
||||
predict,
|
||||
inputs=[
|
||||
gr.inputs.Slider(0, 1000, label='Seed', default=42),
|
||||
gr.inputs.Slider(4, 64, label='Number of Punks', step=1, default=10),
|
||||
gr.Slider(0, 1000, label='Seed', default=42),
|
||||
gr.Slider(4, 64, label='Number of Punks', step=1, default=10),
|
||||
],
|
||||
outputs="image",
|
||||
examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
Related spaces: https://huggingface.co/spaces/abidlabs/chatbot-minimal, https://huggingface.co/spaces/ThomasSimonini/Chat-with-Gandalf-GPT-J6B, https://huggingface.co/spaces/gorkemgoknar/moviechatbot, https://huggingface.co/spaces/Kirili4ik/chat-with-Kirill
|
||||
Tags: NLP, TEXT, HTML
|
||||
Docs: textbox, state
|
||||
Docs: textbox, variable
|
||||
|
||||
## Introduction
|
||||
|
||||
|
@ -76,8 +76,8 @@ Finally, we'll add one more parameter, the `examples`, which allows us to prepop
|
||||
import gradio as gr
|
||||
|
||||
gr.Interface(fn=predict,
|
||||
inputs=gr.inputs.Image(type="pil"),
|
||||
outputs=gr.outputs.Label(num_top_classes=3),
|
||||
inputs=gr.Image(type="pil"),
|
||||
outputs=gr.Label(num_top_classes=3),
|
||||
examples=["lion.jpg", "cheetah.jpg"]).launch()
|
||||
```
|
||||
|
||||
|
@ -74,8 +74,8 @@ Finally, we'll add one more parameter, the `examples`, which allows us to prepop
|
||||
import gradio as gr
|
||||
|
||||
gr.Interface(fn=classify_image,
|
||||
inputs=gr.inputs.Image(shape=(224, 224)),
|
||||
outputs=gr.outputs.Label(num_top_classes=3),
|
||||
inputs=gr.Image(shape=(224, 224)),
|
||||
outputs=gr.Label(num_top_classes=3),
|
||||
examples=["banana.jpg", "car.jpg"]).launch()
|
||||
```
|
||||
|
||||
|
@ -2,7 +2,7 @@
|
||||
|
||||
Related spaces: https://huggingface.co/spaces/abidlabs/streaming-asr-paused, https://huggingface.co/spaces/abidlabs/full-context-asr
|
||||
Tags: ASR, SPEECH, STREAMING
|
||||
Docs: audio, state, textbox
|
||||
Docs: audio, variable, textbox
|
||||
|
||||
## Introduction
|
||||
|
||||
@ -54,7 +54,7 @@ def transcribe(audio):
|
||||
|
||||
gr.Interface(
|
||||
fn=transcribe,
|
||||
inputs=gr.inputs.Audio(source="microphone", type="filepath"),
|
||||
inputs=gr.Audio(source="microphone", type="filepath"),
|
||||
outputs="text").launch()
|
||||
```
|
||||
|
||||
@ -76,23 +76,23 @@ When adding state to a Gradio demo, you need to do a total of 3 things:
|
||||
|
||||
* Add a `state` parameter to the function
|
||||
* Return the updated `state` at the end of the function
|
||||
* Add the `"state"` components to the `inputs` and `outputs` in `Interface`
|
||||
* Add the `"state"` components to the `inputs` and `outputs` in `Interface`
|
||||
|
||||
Here's what the code looks like:
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
|
||||
def transcribe(audio, state=""):
|
||||
text = p(audio)["text"]
|
||||
state += text + " "
|
||||
return state, state
|
||||
|
||||
# Set the starting state to an empty string
|
||||
|
||||
gr.Interface(
|
||||
fn=transcribe,
|
||||
inputs=[
|
||||
gr.inputs.Audio(source="microphone", type="filepath"),
|
||||
"state"
|
||||
gr.Audio(source="microphone", type="filepath", streaming=True),
|
||||
"state"
|
||||
],
|
||||
outputs=[
|
||||
"textbox",
|
||||
@ -126,7 +126,7 @@ def transcribe(audio, state=""):
|
||||
gr.Interface(
|
||||
fn=transcribe,
|
||||
inputs=[
|
||||
gr.inputs.Audio(source="microphone", type="filepath"),
|
||||
gr.Audio(source="microphone", type="filepath", streaming=True),
|
||||
"state"
|
||||
],
|
||||
outputs=[
|
||||
@ -211,8 +211,8 @@ import gradio as gr
|
||||
gr.Interface(
|
||||
fn=transcribe,
|
||||
inputs=[
|
||||
gr.inputs.Audio(source="microphone", type="numpy"),
|
||||
"state"
|
||||
gr.Audio(source="microphone", type="numpy"),
|
||||
"state"
|
||||
],
|
||||
outputs= [
|
||||
"text",
|
||||
|
@ -55,7 +55,7 @@ def calculator(num1, operation, num2):
|
||||
|
||||
iface = gr.Interface(
|
||||
calculator,
|
||||
["number", gr.inputs.Radio(["add", "subtract", "multiply", "divide"]), "number"],
|
||||
["number", gr.Radio(["add", "subtract", "multiply", "divide"]), "number"],
|
||||
"number",
|
||||
allow_flagging="manual"
|
||||
)
|
||||
@ -104,7 +104,7 @@ If we go back to the calculator example, the following code will create the inte
|
||||
```python
|
||||
iface = gr.Interface(
|
||||
calculator,
|
||||
["number", gr.inputs.Radio(["add", "subtract", "multiply", "divide"]), "number"],
|
||||
["number", gr.Radio(["add", "subtract", "multiply", "divide"]), "number"],
|
||||
"number",
|
||||
allow_flagging="manual",
|
||||
flagging_options=["wrong sign", "off by one", "other"]
|
||||
@ -137,7 +137,7 @@ hf_writer = gr.HuggingFaceDatasetSaver(HF_TOKEN, "crowdsourced-calculator-demo")
|
||||
|
||||
iface = gr.Interface(
|
||||
calculator,
|
||||
["number", gr.inputs.Radio(["add", "subtract", "multiply", "divide"]), "number"],
|
||||
["number", gr.Radio(["add", "subtract", "multiply", "divide"]), "number"],
|
||||
"number",
|
||||
allow_flagging="manual",
|
||||
flagging_options=["wrong sign", "off by one", "other"],
|
||||
|
@ -102,7 +102,7 @@ import gradio as gr
|
||||
|
||||
gr.Interface.load(
|
||||
"spaces/eugenesiow/remove-bg",
|
||||
inputs=[gr.inputs.Image(label="Input Image", source="webcam")]
|
||||
inputs=[gr.Image(label="Input Image", source="webcam")]
|
||||
).launch()
|
||||
```
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user