Running, testing, and fixing demos (#1060)

* fixing demos

* fixes
This commit is contained in:
Abubakar Abid 2022-04-21 18:33:23 -07:00 committed by GitHub
parent 1dacb6758a
commit 4c1c735399
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 29 additions and 19 deletions

Binary file not shown.

View File

@ -1,7 +1,7 @@
# A Blocks implementation of https://erlj.notion.site/Neural-Instrument-Cloning-from-very-few-samples-2cf41d8b630842ee8c7eb55036a1bfd6
# Needs to be run from the demo\blocks_neural_instrument_coding folder
import datetime
import os
import random
import gradio as gr
@ -22,7 +22,7 @@ def reconstruct(audio):
io1 = gr.Interface(
lambda x, y, z: "sax.wav",
lambda x, y, z: os.path.join(os.path.dirname(__file__),"sax.wav"),
[
gr.Slider(label="pitch"),
gr.Slider(label="loudness"),
@ -32,7 +32,7 @@ io1 = gr.Interface(
)
io2 = gr.Interface(
lambda x, y, z: "flute.wav",
lambda x, y, z: os.path.join(os.path.dirname(__file__),"flute.wav"),
[
gr.Slider(label="pitch"),
gr.Slider(label="loudness"),
@ -42,7 +42,7 @@ io2 = gr.Interface(
)
io3 = gr.Interface(
lambda x, y, z: "trombone.wav",
lambda x, y, z: os.path.join(os.path.dirname(__file__),"trombone.wav"),
[
gr.Slider(label="pitch"),
gr.Slider(label="loudness"),
@ -52,7 +52,7 @@ io3 = gr.Interface(
)
io4 = gr.Interface(
lambda x, y, z: "sax2.wav",
lambda x, y, z: os.path.join(os.path.dirname(__file__),"sax2.wav"),
[
gr.Slider(label="pitch"),
gr.Slider(label="loudness"),
@ -82,14 +82,14 @@ with demo.clear():
Here are some **real** 16 second saxophone recordings:
"""
)
gr.Audio("sax.wav", label="Here is a real 16 second saxophone recording:")
gr.Audio("sax.wav")
gr.Audio(os.path.join(os.path.dirname(__file__),"sax.wav"), label="Here is a real 16 second saxophone recording:")
gr.Audio(os.path.join(os.path.dirname(__file__),"sax.wav"))
m(
"""\n
Here is a **generated** saxophone recordings:"""
)
a = gr.Audio("new-sax.wav")
a = gr.Audio(os.path.join(os.path.dirname(__file__),"new-sax.wav"))
gr.Button("Generate a new saxophone recording")

View File

@ -33,7 +33,7 @@ def interpret_gender(sentence):
demo = gr.Interface(
fn=gender_of_sentence,
inputs=gr.Textbox(default="She went to his house to get her keys."),
inputs=gr.Textbox(default_value="She went to his house to get her keys."),
outputs="label",
interpretation=interpret_gender,
)

View File

@ -14,7 +14,7 @@ def gender_of_sentence(sentence):
demo = gr.Interface(
fn=gender_of_sentence,
inputs=gr.Textbox(default="She went to his house to get her keys."),
inputs=gr.Textbox(default_value="She went to his house to get her keys."),
outputs="label",
interpretation="default",
)

View File

@ -1,3 +1,4 @@
import os
import requests
import tensorflow as tf
@ -23,7 +24,9 @@ label = gr.Label(num_top_classes=3)
gr.Interface(
fn=classify_image,
inputs=image,
outputs="image",
examples=[["images/cheetah1.jpg"], ["images/lion.jpg"]],
cache_examples=True,
).launch()
outputs=label,
examples=[
os.path.join(os.path.dirname(__file__), "images/cheetah1.jpg"),
os.path.join(os.path.dirname(__file__), "images/lion.jpg")
]
).launch()

View File

@ -26,8 +26,6 @@ demos_list = next(os.walk('.'))[1]
# Some demos are just too large or need to be run in a special way, so we'll just skip them
demos_list.remove('streaming_wav2vec')
demos_list.remove('blocks_neural_instrument_coding')
demos_list.remove('reverse_audio')
demos_list.remove('image_classifier')
demos_list.remove('flagged')
for d, demo_name in enumerate(random.sample(demos_list, args.num_demos)):

View File

@ -1,3 +1,5 @@
import os
import numpy as np
import gradio as gr
@ -8,7 +10,10 @@ def reverse_audio(audio):
return (sr, np.flipud(data))
demo = gr.Interface(reverse_audio, "microphone", "audio", examples="audio")
demo = gr.Interface(fn=reverse_audio,
inputs="microphone",
outputs="audio",
examples=os.path.join(os.path.dirname(__file__), "audio"))
if __name__ == "__main__":
demo.launch()

View File

@ -13,7 +13,11 @@ def sentiment_analysis(text):
return scores
demo = gr.Interface(sentiment_analysis, "textbox", "label", interpretation="default")
demo = gr.Interface(
sentiment_analysis,
gr.Textbox(placeholder="Enter a positive or negative sentence here..."),
"label",
interpretation="default")
if __name__ == "__main__":
demo.launch()

View File

@ -55,7 +55,7 @@
</script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script>
<title>Gradio</title>
<script type="module" crossorigin src="./assets/index.bbdd6a1e.js"></script>
<script type="module" crossorigin src="./assets/index.c7216004.js"></script>
<link rel="stylesheet" href="./assets/index.f12b3b15.css">
</head>