mirror of
https://github.com/gradio-app/gradio.git
synced 2025-03-25 12:10:31 +08:00
merge master
This commit is contained in:
commit
c15885f0cc
7474
demo/Image3D/files/Bunny.obj
Normal file
7474
demo/Image3D/files/Bunny.obj
Normal file
File diff suppressed because it is too large
Load Diff
BIN
demo/Image3D/files/Duck.glb
Normal file
BIN
demo/Image3D/files/Duck.glb
Normal file
Binary file not shown.
1777
demo/Image3D/files/Fox.gltf
Normal file
1777
demo/Image3D/files/Fox.gltf
Normal file
File diff suppressed because one or more lines are too long
6
demo/Image3D/files/source.txt
Normal file
6
demo/Image3D/files/source.txt
Normal file
@ -0,0 +1,6 @@
|
||||
Stanford Bunny:
|
||||
https://graphics.stanford.edu/data/3Dscanrep/
|
||||
https://graphics.stanford.edu/~mdfisher/Data/Meshes/bunny.obj
|
||||
|
||||
Duck & Fox:
|
||||
https://github.com/KhronosGroup/glTF-Sample-Models
|
16
demo/Image3D/run.py
Normal file
16
demo/Image3D/run.py
Normal file
@ -0,0 +1,16 @@
|
||||
import gradio as gr
|
||||
import time
|
||||
|
||||
def load_mesh(mesh_file_name):
|
||||
time.sleep(2)
|
||||
return mesh_file_name
|
||||
|
||||
iface = gr.Interface(
|
||||
fn=load_mesh,
|
||||
inputs=gr.inputs.Image3D(),
|
||||
outputs=gr.outputs.Image3D(clear_color=[0.8, 0.2, 0.2, 1.0]),
|
||||
examples=[["files/Bunny.obj"], ["files/Duck.glb"]]
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
iface.launch(cache_examples=True)
|
@ -1,2 +1,3 @@
|
||||
numpy
|
||||
matplotlib
|
||||
matplotlib
|
||||
bokeh
|
||||
|
2
demo/streaming_stt/.gitignore
vendored
Normal file
2
demo/streaming_stt/.gitignore
vendored
Normal file
@ -0,0 +1,2 @@
|
||||
*.pbmm
|
||||
*.scorer
|
1
demo/streaming_stt/requirements.txt
Normal file
1
demo/streaming_stt/requirements.txt
Normal file
@ -0,0 +1 @@
|
||||
deepspeech==0.8.2
|
42
demo/streaming_stt/run.py
Normal file
42
demo/streaming_stt/run.py
Normal file
@ -0,0 +1,42 @@
|
||||
from deepspeech import Model
|
||||
import gradio as gr
|
||||
import numpy as np
|
||||
|
||||
model_file_path = "deepspeech-0.8.2-models.pbmm"
|
||||
lm_file_path = "deepspeech-0.8.2-models.scorer"
|
||||
beam_width = 100
|
||||
lm_alpha = 0.93
|
||||
lm_beta = 1.18
|
||||
|
||||
model = Model(model_file_path)
|
||||
model.enableExternalScorer(lm_file_path)
|
||||
model.setScorerAlphaBeta(lm_alpha, lm_beta)
|
||||
model.setBeamWidth(beam_width)
|
||||
|
||||
|
||||
def reformat_freq(sr, y):
|
||||
if sr not in (
|
||||
48000,
|
||||
16000,
|
||||
): # Deepspeech only supports 16k, (we convert 48k -> 16k)
|
||||
raise ValueError("Unsupported rate", sr)
|
||||
if sr == 48000:
|
||||
y = (
|
||||
((y / max(np.max(y), 1)) * 32767)
|
||||
.reshape((-1, 3))
|
||||
.mean(axis=1)
|
||||
.astype("int16")
|
||||
)
|
||||
sr = 16000
|
||||
return sr, y
|
||||
|
||||
|
||||
def transcribe(speech, stream):
|
||||
_, y = reformat_freq(*speech)
|
||||
if stream is None:
|
||||
stream = model.createStream()
|
||||
stream.feedAudioContent(y)
|
||||
text = stream.intermediateDecode()
|
||||
return text, stream
|
||||
|
||||
gr.Interface(transcribe, ["microphone", "state"], ["text", "state"], live=True).launch()
|
3
demo/streaming_stt/setup.sh
Normal file
3
demo/streaming_stt/setup.sh
Normal file
@ -0,0 +1,3 @@
|
||||
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm
|
||||
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer
|
||||
apt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg
|
1
demo/streaming_wav2vec/requirements.txt
Normal file
1
demo/streaming_wav2vec/requirements.txt
Normal file
@ -0,0 +1 @@
|
||||
deepspeech==0.8.2
|
43
demo/streaming_wav2vec/run.py
Normal file
43
demo/streaming_wav2vec/run.py
Normal file
@ -0,0 +1,43 @@
|
||||
from deepspeech import Model
|
||||
import gradio as gr
|
||||
import scipy.io.wavfile
|
||||
import numpy as np
|
||||
|
||||
model_file_path = "deepspeech-0.8.2-models.pbmm"
|
||||
lm_file_path = "deepspeech-0.8.2-models.scorer"
|
||||
beam_width = 100
|
||||
lm_alpha = 0.93
|
||||
lm_beta = 1.18
|
||||
|
||||
model = Model(model_file_path)
|
||||
model.enableExternalScorer(lm_file_path)
|
||||
model.setScorerAlphaBeta(lm_alpha, lm_beta)
|
||||
model.setBeamWidth(beam_width)
|
||||
|
||||
|
||||
def reformat_freq(sr, y):
|
||||
if sr not in (
|
||||
48000,
|
||||
16000,
|
||||
): # Deepspeech only supports 16k, (we convert 48k -> 16k)
|
||||
raise ValueError("Unsupported rate", sr)
|
||||
if sr == 48000:
|
||||
y = (
|
||||
((y / max(np.max(y), 1)) * 32767)
|
||||
.reshape((-1, 3))
|
||||
.mean(axis=1)
|
||||
.astype("int16")
|
||||
)
|
||||
sr = 16000
|
||||
return sr, y
|
||||
|
||||
|
||||
def transcribe(speech, stream):
|
||||
_, y = reformat_freq(*speech)
|
||||
if stream is None:
|
||||
stream = model.createStream()
|
||||
stream.feedAudioContent(y)
|
||||
text = stream.intermediateDecode()
|
||||
return text, stream
|
||||
|
||||
gr.Interface(transcribe, ["microphone", "state"], ["text", "state"], live=True).launch()
|
@ -1,6 +1,6 @@
|
||||
Metadata-Version: 2.1
|
||||
Name: gradio
|
||||
Version: 2.8.13
|
||||
Version: 2.9.4
|
||||
Summary: Python library for easily interacting with trained machine learning models
|
||||
Home-page: https://github.com/gradio-app/gradio-UI
|
||||
Author: Abubakar Abid, Ali Abid, Ali Abdalla, Dawood Khan, Ahsen Khaliq, Pete Allen, Ömer Faruk Özdemir
|
||||
|
@ -14,3 +14,4 @@ python-multipart
|
||||
pydub
|
||||
requests
|
||||
uvicorn
|
||||
Jinja2
|
||||
|
@ -62,6 +62,7 @@ class PredictBody(BaseModel):
|
||||
data: List[Any]
|
||||
state: Optional[Any]
|
||||
fn_index: Optional[int]
|
||||
cleared: Optional[bool]
|
||||
|
||||
|
||||
class FlagData(BaseModel):
|
||||
@ -304,7 +305,6 @@ def safe_join(directory: str, path: str) -> Optional[str]:
|
||||
or filename.startswith("../")
|
||||
):
|
||||
return None
|
||||
|
||||
return posixpath.join(directory, filename)
|
||||
|
||||
|
||||
|
@ -55,8 +55,8 @@
|
||||
</script>
|
||||
<script src="https://cdnjs.cloudflare.com/ajax/libs/iframe-resizer/4.3.1/iframeResizer.contentWindow.min.js"></script>
|
||||
<title>Gradio</title>
|
||||
<script type="module" crossorigin src="./assets/index.225863aa.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.39bf42f9.css">
|
||||
<script type="module" crossorigin src="./assets/index.5efcf83d.js"></script>
|
||||
<link rel="stylesheet" href="./assets/index.5fac4bf7.css">
|
||||
</head>
|
||||
|
||||
<body
|
||||
|
@ -1 +1,5 @@
|
||||
<<<<<<< HEAD
|
||||
2.8.13
|
||||
=======
|
||||
2.9.4
|
||||
>>>>>>> main
|
||||
|
@ -9,7 +9,7 @@ When an interface is shared, it is usually accompanied with some form of explana
|
||||
|
||||
For example, take a look at this fun chatbot interface below. It has a title, description, image as well as a link in the bottom.
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/aliabd/rick-and-morty/+" frameBorder="0" height="875" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/aliabd/rick-and-morty/+" frameBorder="0" height="875" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
## The parameters in `Interface`
|
||||
|
||||
|
@ -9,7 +9,7 @@ How well can an algorithm guess what you're drawing? A few years ago, Google rel
|
||||
|
||||
Such models are perfect to use with Gradio's *sketchpad* input, so in this tutorial we will build a Pictionary web application using Gradio. We will be able to build the whole web application in Python, and will look like this (try drawing something!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/draw2/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/draw2/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
Let's get started!
|
||||
|
||||
@ -97,7 +97,7 @@ gr.Interface(fn=predict,
|
||||
|
||||
This produces the following interface, which you can try right here in your browser (try drawing something, like a "snake" or a "laptop"):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/draw2/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/draw2/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
----------
|
||||
|
||||
|
226
guides/create_your_own_friends_with_a_gan.md
Normal file
226
guides/create_your_own_friends_with_a_gan.md
Normal file
@ -0,0 +1,226 @@
|
||||
# Create Your Own Friends with a GAN
|
||||
|
||||
related_spaces: https://huggingface.co/spaces/NimaBoscarino/cryptopunks, https://huggingface.co/spaces/nateraw/cryptopunks-generator
|
||||
tags: GAN, IMAGE, HUB
|
||||
|
||||
Contributed by <a href="https://huggingface.co/NimaBoscarino">Nima Boscarino</a> and <a href="https://huggingface.co/nateraw">Nate Raw</a>
|
||||
|
||||
|
||||
## Introduction
|
||||
|
||||
It seems that cryptocurrencies, [NFTs](https://www.nytimes.com/interactive/2022/03/18/technology/nft-guide.html), and the web3 movement are all the rage these days! Digital assets are being listed on marketplaces for astounding amounts of money, and just about every celebrity is debuting their own NFT collection. While your crypto assets [may be taxable, such as in Canada](https://www.canada.ca/en/revenue-agency/programs/about-canada-revenue-agency-cra/compliance/digital-currency/cryptocurrency-guide.html), today we'll explore some fun and tax-free ways to generate your own assortment of procedurally generated [CryptoPunks](https://www.larvalabs.com/cryptopunks).
|
||||
|
||||
Generative Adversarial Networks, often known just as *GANs*, are a specific class of deep-learning models that are designed to learn from an input dataset to create (*generate!*) new material that is convincingly similar to elements of the original training set. Famously, the website [thispersondoesnotexist.com](https://thispersondoesnotexist.com/) went viral with lifelike, yet synthetic, images of people generated with a model called StyleGAN2. GANs have gained traction in the machine learning world, and are now being used to generate all sorts of images, text, and even [music](https://salu133445.github.io/musegan/)!
|
||||
|
||||
Today we'll briefly look at the high-level intuition behind GANs, and then we'll build a small demo around a pre-trained GAN to see what all the fuss is about. Here's a peek at what we're going to be putting together:
|
||||
|
||||
<iframe src="https://hf.space/embed/NimaBoscarino/cryptopunks/+" frameBorder="0" height="590" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Make sure you have the `gradio` Python package already [installed](/getting_started). To use the pretrained model, also install `torch` and `torchvision`.
|
||||
|
||||
## GANs: a very brief introduction
|
||||
|
||||
Originally proposed in [Goodfellow et al. 2014](https://arxiv.org/abs/1406.2661), GANs are made up of neural networks which compete with the intention of outsmarting each other. One network, known as the *generator*, is responsible for generating images. The other network, the *discriminator*, receives an image at a time from the generator along with a **real** image from the training data set. The discriminator then has to guess: which image is the fake?
|
||||
|
||||
The generator is constantly training to create images which are trickier for the discriminator to identify, while the discriminator raises the bar for the generator every time it correctly detects a fake. As the networks engage in this competitive (*adversarial!*) relationship, the images that get generated improve to the point where they become indistinguishable to human eyes!
|
||||
|
||||
For a more in-depth look at GANs, you can take a look at [this excellent post on Analytics Vidhya](https://www.analyticsvidhya.com/blog/2021/06/a-detailed-explanation-of-gan-with-implementation-using-tensorflow-and-keras/) or this [PyTorch tutorial](https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html). For now, though, we'll dive into a demo!
|
||||
|
||||
## Step 1 — Create the Generator model
|
||||
|
||||
To generate new images with a GAN, you only need the generator model. There are many different architectures that the generator could use, but for this demo we'll use a pretrained GAN generator model with the following architecture:
|
||||
|
||||
```python
|
||||
from torch import nn
|
||||
|
||||
class Generator(nn.Module):
|
||||
# Refer to the link below for explanations about nc, nz, and ngf
|
||||
# https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs
|
||||
def __init__(self, nc=4, nz=100, ngf=64):
|
||||
super(Generator, self).__init__()
|
||||
self.network = nn.Sequential(
|
||||
nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),
|
||||
nn.BatchNorm2d(ngf * 4),
|
||||
nn.ReLU(True),
|
||||
nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),
|
||||
nn.BatchNorm2d(ngf * 2),
|
||||
nn.ReLU(True),
|
||||
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),
|
||||
nn.BatchNorm2d(ngf),
|
||||
nn.ReLU(True),
|
||||
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
|
||||
nn.Tanh(),
|
||||
)
|
||||
|
||||
def forward(self, input):
|
||||
output = self.network(input)
|
||||
return output
|
||||
```
|
||||
|
||||
We're taking the generator from [this repo by @teddykoker](https://github.com/teddykoker/cryptopunks-gan/blob/main/train.py#L90), where you can also see the original discriminator model structure.
|
||||
|
||||
After instantiating the model, we'll load in the weights from the Hugging Face Hub, stored at [nateraw/cryptopunks-gan](https://huggingface.co/nateraw/cryptopunks-gan):
|
||||
|
||||
```python
|
||||
from huggingface_hub import hf_hub_download
|
||||
import torch
|
||||
|
||||
model = Generator()
|
||||
weights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')
|
||||
model.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available
|
||||
```
|
||||
|
||||
## Step 2 — Defining a `predict` function
|
||||
|
||||
The `predict` function is the key to making Gradio work! Whatever inputs we choose through the Gradio interface will get passed through our `predict` function, which should operate on the inputs and generate outputs that we can display with Gradio output components. For GANs it's common to pass random noise in to our model as the input, so we'll generate a tensor of random numbers and pass that through the model. We can then use `torchvision`'s `save_image` function to save the output of the model as a `png` file, and return the file name:
|
||||
|
||||
```python
|
||||
from torchvision.utils import save_image
|
||||
|
||||
def predict(seed):
|
||||
num_punks = 4
|
||||
torch.manual_seed(seed)
|
||||
z = torch.randn(num_punks, 100, 1, 1)
|
||||
punks = model(z)
|
||||
save_image(punks, "punks.png", normalize=True)
|
||||
return 'punks.png'
|
||||
```
|
||||
|
||||
We're giving our `predict` function a `seed` parameter, so that we can fix the random tensor generation with a seed. We'll then be able to reproduce punks if we want to see them again by passing in the same seed.
|
||||
|
||||
*Note!* Our model needs an input tensor of dimensions 100x1x1 to do a single inference, or (BatchSize)x100x1x1 for generating a batch of images. In this demo we'll start by generating 4 punks at a time.
|
||||
|
||||
## Step 3 — Creating a Gradio interface
|
||||
|
||||
At this point you can even run the code you have with `predict(<SOME_NUMBER>)`, and you'll find your freshly generated punks in your file system at `./punks.png`. To make a truly interactive demo, though, we'll build out a simple interface with Gradio. Our goals here are to:
|
||||
|
||||
* Set a slider input so users can choose the "seed" value
|
||||
* Use an image component for our output to showcase the generated punks
|
||||
* Use our `predict()` to take the seed and generate the images
|
||||
|
||||
With `gr.Interface()`, we can define all of that with a single function call:
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
|
||||
gr.Interface(
|
||||
predict,
|
||||
inputs=[
|
||||
gr.inputs.Slider(label='Seed', minimum=0, maximum=1000, default=42),
|
||||
],
|
||||
outputs="image",
|
||||
).launch()
|
||||
```
|
||||
|
||||
Launching the inferface should present you with something like this:
|
||||
|
||||
<iframe src="https://hf.space/embed/NimaBoscarino/cryptopunks-1/+" frameBorder="0" height="590" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
## Step 4 — Even more punks!
|
||||
|
||||
Generating 4 punks at a time is a good start, but maybe we'd like to control how many we want to make each time. Adding more inputs to our Gradio interface is as simple as adding another item to the `inputs` list that we pass to `gr.Interface`:
|
||||
|
||||
```python
|
||||
gr.Interface(
|
||||
predict,
|
||||
inputs=[
|
||||
gr.inputs.Slider(label='Seed', minimum=0, maximum=1000, default=42),
|
||||
gr.inputs.Slider(label='Number of Punks', minimum=4, maximum=64, step=1, default=10), # Adding another slider!
|
||||
],
|
||||
outputs="image",
|
||||
).launch()
|
||||
```
|
||||
|
||||
The new input will be passed to our `predict()` function, so we have to make some changes to that function to accept a new parameter:
|
||||
|
||||
```python
|
||||
def predict(seed, num_punks):
|
||||
torch.manual_seed(seed)
|
||||
z = torch.randn(num_punks, 100, 1, 1)
|
||||
punks = model(z)
|
||||
save_image(punks, "punks.png", normalize=True)
|
||||
return 'punks.png'
|
||||
```
|
||||
|
||||
When you relaunch your interface, you should see a second slider that'll let you control the number of punks!
|
||||
|
||||
## Step 5 - Polishing it up
|
||||
|
||||
Your Gradio app is pretty much good to go, but you can add a few extra things to really make it ready for the spotlight ✨
|
||||
|
||||
We can add some examples that users can easily try out by adding this to the `gr.Interface`:
|
||||
|
||||
```python
|
||||
gr.Interface(
|
||||
# ...
|
||||
# keep everything as it is, and then add
|
||||
examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],
|
||||
).launch(cache_examples=True) # cache_examples is optional
|
||||
```
|
||||
|
||||
The `examples` parameter takes a list of lists, where each item in the sublists is ordered in the same order that we've listed the `inputs`. So in our case, `[seed, num_punks]`. Give it a try!
|
||||
|
||||
You can also try adding a `title`, `description`, and `article` to the `gr.Interface`. Each of those parameters accepts a string, so try it out and see what happens 👀 `article` will also accept HTML, as [explored in a previous guide](./adding_rich_descriptions_to_your_demo)!
|
||||
|
||||
When you're all done, you may end up with something like this:
|
||||
|
||||
<iframe src="https://hf.space/embed/NimaBoscarino/cryptopunks/+" frameBorder="0" height="590" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
For reference, here is our full code:
|
||||
|
||||
```python
|
||||
import torch
|
||||
from torch import nn
|
||||
from huggingface_hub import hf_hub_download
|
||||
from torchvision.utils import save_image
|
||||
import gradio as gr
|
||||
|
||||
class Generator(nn.Module):
|
||||
# Refer to the link below for explanations about nc, nz, and ngf
|
||||
# https://pytorch.org/tutorials/beginner/dcgan_faces_tutorial.html#inputs
|
||||
def __init__(self, nc=4, nz=100, ngf=64):
|
||||
super(Generator, self).__init__()
|
||||
self.network = nn.Sequential(
|
||||
nn.ConvTranspose2d(nz, ngf * 4, 3, 1, 0, bias=False),
|
||||
nn.BatchNorm2d(ngf * 4),
|
||||
nn.ReLU(True),
|
||||
nn.ConvTranspose2d(ngf * 4, ngf * 2, 3, 2, 1, bias=False),
|
||||
nn.BatchNorm2d(ngf * 2),
|
||||
nn.ReLU(True),
|
||||
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 0, bias=False),
|
||||
nn.BatchNorm2d(ngf),
|
||||
nn.ReLU(True),
|
||||
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
|
||||
nn.Tanh(),
|
||||
)
|
||||
|
||||
def forward(self, input):
|
||||
output = self.network(input)
|
||||
return output
|
||||
|
||||
model = Generator()
|
||||
weights_path = hf_hub_download('nateraw/cryptopunks-gan', 'generator.pth')
|
||||
model.load_state_dict(torch.load(weights_path, map_location=torch.device('cpu'))) # Use 'cuda' if you have a GPU available
|
||||
|
||||
def predict(seed, num_punks):
|
||||
torch.manual_seed(seed)
|
||||
z = torch.randn(num_punks, 100, 1, 1)
|
||||
punks = model(z)
|
||||
save_image(punks, "punks.png", normalize=True)
|
||||
return 'punks.png'
|
||||
|
||||
gr.Interface(
|
||||
predict,
|
||||
inputs=[
|
||||
gr.inputs.Slider(label='Seed', minimum=0, maximum=1000, default=42),
|
||||
gr.inputs.Slider(label='Number of Punks', minimum=4, maximum=64, step=1, default=10),
|
||||
],
|
||||
outputs="image",
|
||||
examples=[[123, 15], [42, 29], [456, 8], [1337, 35]],
|
||||
).launch(cache_examples=True)
|
||||
```
|
||||
----------
|
||||
|
||||
Congratulations! You've built out your very own GAN-powered CryptoPunks generator, with a fancy Gradio interface that makes it easy for anyone to use. Now you can [scour the Hub for more GANs](https://huggingface.co/models?other=gan) (or train your own) and continue making even more awesome demos 🤗
|
@ -11,7 +11,7 @@ Using `gradio`, you can easily build a demo of your chatbot model and share that
|
||||
|
||||
This tutorial will show how to take a pretrained chatbot model and deploy it with a Gradio interface in 4 steps. The live chatbot interface that we create will look something like this (try it!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/chatbot-stylized/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/chatbot-stylized/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
Chatbots are *stateful*, meaning that the model's prediction can change depending on how the user has previously interacted with the model. So, in this tutorial, we will also cover how to use **state** with Gradio demos.
|
||||
|
||||
@ -86,7 +86,7 @@ gr.Interface(fn=predict,
|
||||
|
||||
This produces the following interface, which you can try right here in your browser (try typing in some simple greetings like "Hi!" to get started):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/chatbot-minimal/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/chatbot-minimal/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
----------
|
||||
|
@ -9,7 +9,7 @@ Image classification is a central task in computer vision. Building better class
|
||||
|
||||
Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/pytorch-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/pytorch-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
Let's get started!
|
||||
@ -82,7 +82,7 @@ gr.Interface(fn=predict,
|
||||
|
||||
This produces the following interface, which you can try right here in your browser (try uploading your own examples!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/pytorch-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/pytorch-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
----------
|
||||
|
||||
|
@ -9,7 +9,7 @@ Image classification is a central task in computer vision. Building better class
|
||||
|
||||
Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in Python, and it will look like this (try one of the examples!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/keras-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/keras-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
Let's get started!
|
||||
@ -80,7 +80,7 @@ gr.Interface(fn=classify_image,
|
||||
|
||||
This produces the following interface, which you can try right here in your browser (try uploading your own examples!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/keras-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/keras-image-classifier/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
----------
|
||||
|
||||
|
@ -9,7 +9,7 @@ Image classification is a central task in computer vision. Building better class
|
||||
|
||||
State-of-the-art image classifiers are based on the *transformers* architectures, originally popularized for NLP tasks. Such architectures are typically called vision transformers (ViT). Such models are perfect to use with Gradio's *image* input component, so in this tutorial we will build a web demo to classify images using Gradio. We will be able to build the whole web application in a **single line of Python**, and it will look like this (try one of the examples!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/vision-transformer/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/vision-transformer/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
Let's get started!
|
||||
@ -47,7 +47,7 @@ Notice that we have added one more parameter, the `examples`, which allows us to
|
||||
|
||||
This produces the following interface, which you can try right here in your browser. When you input an image, it is automatically preprocessed and sent to the Hugging Face Hub API, where it is passed through the model and returned as a human-interpretable prediction. Try uploading your own image!
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/vision-transformer/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/abidlabs/vision-transformer/+" frameBorder="0" height="660" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
----------
|
||||
|
||||
|
232
guides/real_time_speech_recognition.md
Normal file
232
guides/real_time_speech_recognition.md
Normal file
@ -0,0 +1,232 @@
|
||||
# Real Time Speech Recognition
|
||||
|
||||
related_spaces: https://huggingface.co/spaces/abidlabs/streaming-asr-paused, https://huggingface.co/spaces/abidlabs/full-context-asr
|
||||
tags: ASR, SPEECH, STREAMING
|
||||
|
||||
## Introduction
|
||||
|
||||
Automatic speech recognition (ASR), the conversion of spoken speech to text, is a very important and thriving area of machine learning. ASR algorithms run on practically every smartphone, and are becoming increasingly embedded in professional workflows, such as digital assistants for nurses and doctors. Because ASR algorithms are designed to be used directly by customers and end users, it is important to validate that they are behaving as expected when confronted with a wide variety of speech patterns (different accents, pitches, and background audio conditions).
|
||||
|
||||
Using `gradio`, you can easily build a demo of your ASR model and share that with a testing team, or test it yourself by speaking through the microphone on your device.
|
||||
|
||||
This tutorial will show how to take a pretrained speech to text model and deploy it with a Gradio interface. We will start with a ***full-context*** model, in which the user speaks the entire audio before the prediction runs. Then we will adapt the demo to make it ***streaming***, meaning that the audio model will convert speech as you speak. The streaming demo that we create will look something like this (try it below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused)!):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/streaming-asr-paused/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
Real-time ASR is inherently *stateful*, meaning that the model's predictions change depending on what words the user previously spoke. So, in this tutorial, we will also cover how to use **state** with Gradio demos.
|
||||
|
||||
### Prerequisites
|
||||
|
||||
Make sure you have the `gradio` Python package already [installed](/getting_started). You will also need a pretrained speech recognition model. In this tutorial, we will build demos from 2 ASR libraries:
|
||||
|
||||
* Transformers (for this, `pip install transformers` and `pip install torch`)
|
||||
* DeepSpeech (`pip install deepspeech==0.8.2`)
|
||||
|
||||
Make sure you have at least one of these installed so that you can follow along the tutorial. You will also need `ffmpeg` [installed on your system](https://www.ffmpeg.org/download.html), if you do not already have it, to process files from the microphone.
|
||||
|
||||
## Step 1 — Setting up the Transformers ASR Model
|
||||
|
||||
First, you will need to have an ASR model that you have either trained yourself or you will need to download a pretrained model. In this tutorial, we will start by using a pretrained ASR model from the Hugging Face model, `Wav2Vec2`.
|
||||
|
||||
Here is the code to load `Wav2Vec2` from Hugging Face `transformers`.
|
||||
|
||||
```python
|
||||
from transformers import pipeline
|
||||
|
||||
p = pipeline("automatic-speech-recognition")
|
||||
```
|
||||
|
||||
That's it! By default, the automatic speech recognition model pipeline loads Facebook's `facebook/wav2vec2-base-960h` model.
|
||||
|
||||
## Step 2 — Creating a Full-Context ASR Demo with Transformers
|
||||
|
||||
We will start by creating a *full-context* ASR demo, in which the user speaks the full audio before using the ASR model to run inference. This is very easy with Gradio -- we simply create a function around the `pipeline` object above.
|
||||
|
||||
We will use `gradio`'s built in `Audio` component, configured to take input from the user's microphone and return a filepath for the recorded audio. The output component will be a plain `Textbox`.
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
|
||||
def transcribe(audio):
|
||||
text = p(audio)["text"]
|
||||
return text
|
||||
|
||||
gr.Interface(
|
||||
fn=transcribe,
|
||||
inputs=gr.inputs.Audio(source="microphone", type="filepath"),
|
||||
outputs="text").launch()
|
||||
```
|
||||
|
||||
So what's happening here? The `transcribe` function takes a single parameter, `audio`, which is a filepath to the audio file that the user has recorded. The `pipeline` object expects a filepath and converts it to text, which is returned to the frontend and displayed in a textbox.
|
||||
|
||||
Let's see it in action! (Record a short audio clip and then click submit, or [open in a new tab](https://huggingface.co/spaces/abidlabs/full-context-asr)):
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/full-context-asr/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
## Step 3 — Creating a Streaming ASR Demo with Transformers
|
||||
|
||||
Ok great! We've built an ASR model that works well for short audio clips. However, if you are recording longer audio clips, you probably want a *streaming* interface, one that transcribes audio as the user speaks instead of just all-at-once at the end.
|
||||
|
||||
The good news is that it's not too difficult to adapt the demo we just made to make it streaming, using the same `Wav2Vec2` model.
|
||||
|
||||
The biggest change is that we must now introduce a `state` parameter, which holds the audio that has been *transcribed so far*. This allows us to only the latest chunk of audio and simply append it to the audio we previously transcribed.
|
||||
|
||||
When adding state to a Gradio demo, you need to do a total of 3 things:
|
||||
|
||||
* Add a `state` parameter to the function
|
||||
* Return the updated `state` at the end of the function
|
||||
* Add the `"state"` components to the `inputs` and `outputs` in `Interface`
|
||||
|
||||
Here's what the code looks like:
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
|
||||
def transcribe(audio, state=""):
|
||||
text = p(audio)["text"]
|
||||
state += text + " "
|
||||
return state, state
|
||||
|
||||
gr.Interface(
|
||||
fn=transcribe,
|
||||
inputs=[
|
||||
gr.inputs.Audio(source="microphone", type="filepath"),
|
||||
"state"
|
||||
],
|
||||
outputs=[
|
||||
"textbox",
|
||||
"state"
|
||||
],
|
||||
live=True).launch()
|
||||
```
|
||||
|
||||
Notice that we've also made one other change, which is that we've set `live=True`. This keeps the Gradio interface running constantly, so it automatically transcribes audio without the user having to repeatedly hit the submit button.
|
||||
|
||||
Let's see how it does (try below or [in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr))!
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/streaming-asr/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
One thing that you may notice is that the transcription quality has dropped since the chunks of audio are so small, they lack the context to properly be transcribed. A "hacky" fix to this is to simply increase the runtime of the `transcribe()` function so that longer audio chunks are processed. We can do this by adding a `time.sleep()` inside the function, as shown below (we'll see a proper fix next)
|
||||
|
||||
```python
|
||||
from transformers import pipeline
|
||||
import gradio as gr
|
||||
import time
|
||||
|
||||
p = pipeline("automatic-speech-recognition")
|
||||
|
||||
def transcribe(audio, state=""):
|
||||
time.sleep(2)
|
||||
text = p(audio)["text"]
|
||||
state += text + " "
|
||||
return state, state
|
||||
|
||||
gr.Interface(
|
||||
fn=transcribe,
|
||||
inputs=[
|
||||
gr.inputs.Audio(source="microphone", type="filepath"),
|
||||
"state"
|
||||
],
|
||||
outputs=[
|
||||
"textbox",
|
||||
"state"
|
||||
],
|
||||
live=True).launch()
|
||||
```
|
||||
|
||||
Try the demo below to see the difference (or [open in a new tab](https://huggingface.co/spaces/abidlabs/streaming-asr-paused))!
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/abidlabs/streaming-asr-paused/+" frameBorder="0" height="350" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
## Step 4 — Creating a Streaming ASR Demo with DeepSpeech
|
||||
|
||||
You're not restricted to ASR models from the `transformers` library -- you can use your own models or models from other libraries. The `DeepSpeech` library contains models that are specifically designed to handle streaming audio data. These models perform really well with streaming data as they are able to account for previous chunks of audio data when making predictions.
|
||||
|
||||
Going through the DeepSpeech library is beyond the scope of this Guide (check out their [excellent documentation here](https://deepspeech.readthedocs.io/en/r0.9/)), but you can use Gradio very similarly with a DeepSpeech ASR model as with a Transformers ASR model.
|
||||
|
||||
Here's a complete example (on Linux):
|
||||
|
||||
First install the DeepSpeech library and download the pretrained models from the terminal:
|
||||
|
||||
```bash
|
||||
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.pbmm
|
||||
wget https://github.com/mozilla/DeepSpeech/releases/download/v0.8.2/deepspeech-0.8.2-models.scorer
|
||||
apt install libasound2-dev portaudio19-dev libportaudio2 libportaudiocpp0 ffmpeg
|
||||
pip install deepspeech==0.8.2
|
||||
```
|
||||
|
||||
Then, create a similar `transcribe()` function as before:
|
||||
|
||||
```python
|
||||
from deepspeech import Model
|
||||
import numpy as np
|
||||
|
||||
model_file_path = "deepspeech-0.8.2-models.pbmm"
|
||||
lm_file_path = "deepspeech-0.8.2-models.scorer"
|
||||
beam_width = 100
|
||||
lm_alpha = 0.93
|
||||
lm_beta = 1.18
|
||||
|
||||
model = Model(model_file_path)
|
||||
model.enableExternalScorer(lm_file_path)
|
||||
model.setScorerAlphaBeta(lm_alpha, lm_beta)
|
||||
model.setBeamWidth(beam_width)
|
||||
|
||||
|
||||
def reformat_freq(sr, y):
|
||||
if sr not in (
|
||||
48000,
|
||||
16000,
|
||||
): # Deepspeech only supports 16k, (we convert 48k -> 16k)
|
||||
raise ValueError("Unsupported rate", sr)
|
||||
if sr == 48000:
|
||||
y = (
|
||||
((y / max(np.max(y), 1)) * 32767)
|
||||
.reshape((-1, 3))
|
||||
.mean(axis=1)
|
||||
.astype("int16")
|
||||
)
|
||||
sr = 16000
|
||||
return sr, y
|
||||
|
||||
|
||||
def transcribe(speech, stream):
|
||||
_, y = reformat_freq(*speech)
|
||||
if stream is None:
|
||||
stream = model.createStream()
|
||||
stream.feedAudioContent(y)
|
||||
text = stream.intermediateDecode()
|
||||
return text, stream
|
||||
|
||||
```
|
||||
|
||||
Then, create a Gradio Interface as before (the only difference being that the return type should be `numpy` instead of a `filepath` to be compatible with the DeepSpeech models)
|
||||
|
||||
```python
|
||||
import gradio as gr
|
||||
|
||||
gr.Interface(
|
||||
fn=transcribe,
|
||||
inputs=[
|
||||
gr.inputs.Audio(source="microphone", type="numpy"),
|
||||
"state"
|
||||
],
|
||||
outputs= [
|
||||
"text",
|
||||
"state"
|
||||
],
|
||||
live=True).launch()
|
||||
```
|
||||
|
||||
Running all of this should allow you to deploy your realtime ASR model with a nice GUI. Try it out and see how well it works for you.
|
||||
|
||||
--------------------------------------------
|
||||
|
||||
|
||||
And you're done! That's all the code you need to build a web-based GUI for your ASR model.
|
||||
|
||||
Fun tip: you can share your ASR model instantly with others simply by setting `share=True` in `launch()`.
|
||||
|
||||
|
@ -63,7 +63,7 @@ iface = gr.Interface(
|
||||
iface.launch()
|
||||
```
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/aliabd/calculator-flag-basic/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/aliabd/calculator-flag-basic/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
When you click the flag button above, the directory where the interface was launched will include a new flagged subfolder, with a csv file inside it. This csv file includes all the data that was flagged.
|
||||
@ -112,7 +112,7 @@ iface = gr.Interface(
|
||||
|
||||
iface.launch()
|
||||
```
|
||||
<iframe src="https://hf.space/gradioiframe/aliabd/calculator-flagging-options/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/aliabd/calculator-flagging-options/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
When users click the flag button, the csv file will now include a column indicating the selected option.
|
||||
|
||||
@ -146,7 +146,7 @@ iface = gr.Interface(
|
||||
|
||||
iface.launch()
|
||||
```
|
||||
<iframe src="https://hf.space/gradioiframe/aliabd/calculator-flagging-crowdsourced/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/aliabd/calculator-flagging-crowdsourced/+" frameBorder="0" height="500" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
You can now see all the examples flagged above in this [public HF dataset](https://huggingface.co/datasets/aliabd/crowdsourced-calculator-demo/blob/main/data.csv).
|
||||
|
||||
|
@ -1,9 +1,9 @@
|
||||
# Using Hugging Face Integrations
|
||||
|
||||
related_spaces: https://huggingface.co/spaces/osanseviero/helsinki_translation_en_es, https://huggingface.co/spaces/osanseviero/remove-bg-webcam, https://huggingface.co/spaces/mrm8488/GPT-J-6B, https://huggingface.co/spaces/akhaliq/T0pp, https://huggingface.co/spaces/osanseviero/mix_match_gradio
|
||||
related_spaces: https://huggingface.co/spaces/farukozderim/Model-Comparator-Space-Builder, https://huggingface.co/spaces/osanseviero/helsinki_translation_en_es, https://huggingface.co/spaces/osanseviero/remove-bg-webcam, https://huggingface.co/spaces/mrm8488/GPT-J-6B, https://huggingface.co/spaces/akhaliq/T0pp, https://huggingface.co/spaces/osanseviero/mix_match_gradio
|
||||
tags: HUB, SPACES, EMBED
|
||||
|
||||
Contributed by <a href="https://huggingface.co/osanseviero">Omar Sanseviero</a> 🦙
|
||||
Contributed by <a href="https://huggingface.co/osanseviero">Omar Sanseviero</a> 🦙 and <a href="https://huggingface.co/farukozderim">Ömer Faruk Özdemir</a>
|
||||
|
||||
## Introduction
|
||||
|
||||
@ -39,7 +39,7 @@ iface.launch()
|
||||
|
||||
The previous code produces the following interface, which you can try right here in your browser:
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/osanseviero/helsinki_translation_en_es/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/osanseviero/helsinki_translation_en_es/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
This demo requires installing four libraries: gradio, torch, transformers, and sentencepiece. Apart from that, this is a Gradio with the structure you're used to! The demo is a usual Gradio `Interface` with a prediction function, a specified input, and a specified output. The prediction function executes the `pipeline` function with the given input, retrieves the first (and only) translation result, and returns the `translation_text` field, which you're interested in.
|
||||
|
||||
@ -91,7 +91,7 @@ gr.Interface.load("spaces/eugenesiow/remove-bg").launch()
|
||||
|
||||
The code snippet above will load the same interface as the corresponding Space demo.
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/eugenesiow/remove-bg/+" frameBorder="0" height="900" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/eugenesiow/remove-bg/+" frameBorder="0" height="900" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
You can change UI elements, such as the title or theme, but also change the expected type. The previous Space expected users to upload images. What if you would like users to have their webcam and remove the background from there? You can load the Space but change the source of input as follows:
|
||||
@ -107,7 +107,7 @@ gr.Interface.load(
|
||||
|
||||
The code above generates the following demo.
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/osanseviero/remove-bg-webcam/+" frameBorder="0" height="600" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/osanseviero/remove-bg-webcam/+" frameBorder="0" height="600" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
As you can see, the demo looks the same, but it uses a webcam input instead of user-uploaded images.
|
||||
|
||||
@ -136,16 +136,41 @@ iface3.launch()
|
||||
|
||||
`iface1` and `iface2` are loading existing Spaces. Then, with `Parallel`, you can run the interfaces parallelly. When you click submit, you will get the output for both interfaces. This is how the demo looks like:
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/osanseviero/mix_match_gradio/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/osanseviero/mix_match_gradio/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
Although both models are generative, you can see that the way both models behave is very different. That's a powerful application of `Parallel`!
|
||||
|
||||
## Creating Spaces with python
|
||||
|
||||
Making use of the [huggingface_hub client library](https://huggingface.co/docs/huggingface_hub/index) library you can create new Spaces or model repositories. You can do this even in a Gradio Space! You can find an example space [here](https://huggingface.co/spaces/farukozderim/Model-Comparator-Space-Builder). This Space creates a new Space comparing different models or spaces with the support of Gradio `load` and `Parallel`. Now you can try creating cool spaces with all kinds of functionality 😎.
|
||||
|
||||
```python
|
||||
from huggingface_hub import (
|
||||
create_repo,
|
||||
get_full_repo_name,
|
||||
upload_file,
|
||||
)
|
||||
create_repo(name=target_space_name, token=hf_token, repo_type="space", space_sdk="gradio")
|
||||
repo_name = get_full_repo_name(model_id=target_space_name, token=hf_token)
|
||||
file_url = upload_file(
|
||||
path_or_fileobj="file.txt",
|
||||
path_in_repo="app.py",
|
||||
repo_id=repo_name,
|
||||
repo_type="space",
|
||||
token=hf_token,
|
||||
)
|
||||
```
|
||||
Here, `create_repo` creates a gradio repo with the target name under a specific account using that account's Write Token. `repo_name` gets the full repo name of the related repo. Finally `upload_file` uploads a file inside the repo with the name `app.py`.
|
||||
|
||||
<iframe src="https://hf.space/embed/farukozderim/Model-Comparator-Space-Builder/+" frameBorder="0" height="800" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
## Embedding your Space demo on other websites
|
||||
|
||||
Throughout this guide, you've seen there are Gradio demos embedded. You can also do this on own website! The first step is to create a Space with the demo you want to showcase. You can embed it in your HTML code, as shown in the following self-contained example.
|
||||
|
||||
```bash
|
||||
<iframe src="https://hf.space/gradioiframe/osanseviero/mix_match_gradio/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/osanseviero/mix_match_gradio/+" frameBorder="0" height="450" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
```
|
||||
|
||||
## Recap
|
||||
@ -158,4 +183,4 @@ That's it! Let's recap what you can do:
|
||||
4. Combine multiple Spaces by running them sequentially or parallelly.
|
||||
5. Embed your Space demo directly on a website.
|
||||
|
||||
🤗
|
||||
🤗
|
||||
|
@ -14,13 +14,13 @@ This button opens up interface-specific API docs. This will show you the predict
|
||||
|
||||
Below is an (iframed) example: the API Docs of [this space](https://huggingface.co/spaces/aliabd/nubia).
|
||||
|
||||
<iframe src="https://hf.space/gradioiframe/aliabd/nubia/api" frameBorder="5" height="725" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
<iframe src="https://hf.space/embed/aliabd/nubia/api" frameBorder="5" height="725" title="Gradio app" class="container p-0 flex-grow space-iframe" allow="accelerometer; ambient-light-sensor; autoplay; battery; camera; document-domain; encrypted-media; fullscreen; geolocation; gyroscope; layout-animations; legacy-image-formats; magnetometer; microphone; midi; oversized-images; payment; picture-in-picture; publickey-credentials-get; sync-xhr; usb; vr ; wake-lock; xr-spatial-tracking" sandbox="allow-forms allow-modals allow-popups allow-popups-to-escape-sandbox allow-same-origin allow-scripts allow-downloads"></iframe>
|
||||
|
||||
|
||||
It shows that there are 7 sections on the page
|
||||
|
||||
* The predict **endpoint**:
|
||||
* Where to send the payload (`https://hf.space/gradioiframe/aliabd/nubia/+/api/predict/`). This is likely the most important piece of information as it defines where the request will be sent.
|
||||
* Where to send the payload (`https://hf.space/embed/aliabd/nubia/+/api/predict/`). This is likely the most important piece of information as it defines where the request will be sent.
|
||||
* The **inputs** and their types
|
||||
* The **outputs** and their types
|
||||
* The **payload**:
|
||||
|
@ -5,7 +5,7 @@ if [ -z "$(ls | grep CONTRIBUTING.md)" ]; then
|
||||
else
|
||||
echo "Uploading to pypi"
|
||||
set -e
|
||||
git pull origin master
|
||||
git pull origin main
|
||||
old_version=$(grep -Po "(?<=version=\")[^\"]+(?=\")" setup.py)
|
||||
echo "Current version is $old_version. New version?"
|
||||
read new_version
|
||||
|
3
setup.py
3
setup.py
@ -5,7 +5,7 @@ except ImportError:
|
||||
|
||||
setup(
|
||||
name="gradio",
|
||||
version="2.8.13",
|
||||
version="2.9.4",
|
||||
include_package_data=True,
|
||||
description="Python library for easily interacting with trained machine learning models",
|
||||
author="Abubakar Abid, Ali Abid, Ali Abdalla, Dawood Khan, Ahsen Khaliq, Pete Allen, Ömer Faruk Özdemir",
|
||||
@ -31,5 +31,6 @@ setup(
|
||||
"pydub",
|
||||
"requests",
|
||||
"uvicorn",
|
||||
"Jinja2"
|
||||
],
|
||||
)
|
||||
|
@ -214,7 +214,7 @@ class TestLoadInterface(unittest.TestCase):
|
||||
|
||||
def test_speech_recognition_model(self):
|
||||
interface_info = gr.external.load_interface(
|
||||
"models/facebook/wav2vec2-base-960h"
|
||||
"models/facebook/wav2vec2-large-960h-lv60-self"
|
||||
)
|
||||
io = gr.Interface(**interface_info)
|
||||
io.api_mode = True
|
||||
|
1777
test/test_files/Fox.gltf
Normal file
1777
test/test_files/Fox.gltf
Normal file
File diff suppressed because one or more lines are too long
@ -739,6 +739,52 @@ class TestTimeseries(unittest.TestCase):
|
||||
)
|
||||
|
||||
|
||||
class TestImage3D(unittest.TestCase):
|
||||
def test_as_component(self):
|
||||
Image3D = gr.test_data.BASE64_IMAGE3D
|
||||
Image3D_input = gr.inputs.Image3D()
|
||||
output = Image3D_input.preprocess(Image3D)
|
||||
self.assertIsInstance(output, str)
|
||||
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
to_save = Image3D_input.save_flagged(
|
||||
tmpdirname, "Image3D_input", Image3D, None
|
||||
)
|
||||
self.assertEqual("Image3D_input/0", to_save)
|
||||
to_save = Image3D_input.save_flagged(
|
||||
tmpdirname, "Image3D_input", Image3D, None
|
||||
)
|
||||
self.assertEqual("Image3D_input/1", to_save)
|
||||
restored = Image3D_input.restore_flagged(tmpdirname, to_save, None)
|
||||
self.assertEqual(restored, "Image3D_input/1")
|
||||
|
||||
self.assertIsInstance(Image3D_input.generate_sample(), dict)
|
||||
Image3D_input = gr.inputs.Image3D(label="Upload Your 3D Image Model")
|
||||
self.assertEqual(
|
||||
Image3D_input.get_template_context(),
|
||||
{
|
||||
"optional": False,
|
||||
"name": "image3d",
|
||||
"label": "Upload Your 3D Image Model",
|
||||
},
|
||||
)
|
||||
|
||||
self.assertIsNone(Image3D_input.preprocess(None))
|
||||
Image3D["is_example"] = True
|
||||
self.assertIsNotNone(Image3D_input.preprocess(Image3D))
|
||||
Image3D_input = gr.inputs.Image3D()
|
||||
with self.assertRaises(NotImplementedError):
|
||||
Image3D_input.serialize(Image3D, True)
|
||||
|
||||
def test_in_interface(self):
|
||||
Image3D = gr.test_data.BASE64_IMAGE3D
|
||||
iface = gr.Interface(lambda x: x, "Image3D", "Image3D")
|
||||
self.assertEqual(
|
||||
iface.process([Image3D])[0][0]["data"],
|
||||
Image3D["data"].replace("@file/gltf", ""),
|
||||
)
|
||||
|
||||
|
||||
class TestNames(unittest.TestCase):
|
||||
# this ensures that `components.get_component_instance()` works correctly when instantiating from components
|
||||
def test_no_duplicate_uncased_names(self):
|
||||
|
@ -546,5 +546,35 @@ class TestTimeseries(unittest.TestCase):
|
||||
)
|
||||
|
||||
|
||||
<<<<<<< HEAD
|
||||
=======
|
||||
class TestImage3D(unittest.TestCase):
|
||||
def test_as_component(self):
|
||||
Image3D = "test/test_files/Fox.gltf"
|
||||
Image3D_output = gr.outputs.Image3D()
|
||||
self.assertTrue(
|
||||
Image3D_output.postprocess(Image3D)["data"].startswith("data:;base64,")
|
||||
)
|
||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
||||
to_save = Image3D_output.save_flagged(
|
||||
tmpdirname, "Image3D_output", gr.test_data.BASE64_IMAGE3D, None
|
||||
)
|
||||
self.assertEqual("Image3D_output/0.gltf", to_save)
|
||||
to_save = Image3D_output.save_flagged(
|
||||
tmpdirname, "Image3D_output", gr.test_data.BASE64_IMAGE3D, None
|
||||
)
|
||||
self.assertEqual("Image3D_output/1.gltf", to_save)
|
||||
|
||||
|
||||
class TestNames(unittest.TestCase):
|
||||
def test_no_duplicate_uncased_names(
|
||||
self,
|
||||
): # this ensures that get_input_instance() works correctly when instantiating from components
|
||||
subclasses = gr.outputs.OutputComponent.__subclasses__()
|
||||
unique_subclasses_uncased = set([s.__name__.lower() for s in subclasses])
|
||||
self.assertEqual(len(subclasses), len(unique_subclasses_uncased))
|
||||
|
||||
|
||||
>>>>>>> main
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
@ -2,4 +2,4 @@ packages/app/public/**
|
||||
pnpm-workspace.yaml
|
||||
packages/app/dist/**
|
||||
pnpm-lock.yaml
|
||||
packages/plot/src/Plot.svelte
|
||||
packages/plot/src/Plot.svelte
|
||||
|
215
ui/package-lock.json
generated
215
ui/package-lock.json
generated
@ -13,9 +13,54 @@
|
||||
"prettier-plugin-svelte": "^2.6.0",
|
||||
"svelte": "^3.46.3",
|
||||
"svelte-check": "^2.4.1",
|
||||
"svelte-i18n": "^3.3.13",
|
||||
"vitest": "^0.3.2"
|
||||
}
|
||||
},
|
||||
"node_modules/@formatjs/ecma402-abstract": {
|
||||
"version": "1.11.3",
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/ecma402-abstract/-/ecma402-abstract-1.11.3.tgz",
|
||||
"integrity": "sha512-kP/Buv5vVFMAYLHNvvUzr0lwRTU0u2WTy44Tqwku1X3C3lJ5dKqDCYVqA8wL+Y19Bq+MwHgxqd5FZJRCIsLRyQ==",
|
||||
"dependencies": {
|
||||
"@formatjs/intl-localematcher": "0.2.24",
|
||||
"tslib": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@formatjs/fast-memoize": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/fast-memoize/-/fast-memoize-1.2.1.tgz",
|
||||
"integrity": "sha512-Rg0e76nomkz3vF9IPlKeV+Qynok0r7YZjL6syLz4/urSg0IbjPZCB/iYUMNsYA643gh4mgrX3T7KEIFIxJBQeg==",
|
||||
"dependencies": {
|
||||
"tslib": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@formatjs/icu-messageformat-parser": {
|
||||
"version": "2.0.18",
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/icu-messageformat-parser/-/icu-messageformat-parser-2.0.18.tgz",
|
||||
"integrity": "sha512-vquIzsAJJmZ5jWVH8dEgUKcbG4yu3KqtyPet+q35SW5reLOvblkfeCXTRW2TpIwNXzdVqsJBwjbTiRiSU9JxwQ==",
|
||||
"dependencies": {
|
||||
"@formatjs/ecma402-abstract": "1.11.3",
|
||||
"@formatjs/icu-skeleton-parser": "1.3.5",
|
||||
"tslib": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@formatjs/icu-skeleton-parser": {
|
||||
"version": "1.3.5",
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/icu-skeleton-parser/-/icu-skeleton-parser-1.3.5.tgz",
|
||||
"integrity": "sha512-Nhyo2/6kG7ZfgeEfo02sxviOuBcvtzH6SYUharj3DLCDJH3A/4OxkKcmx/2PWGX4bc6iSieh+FA94CsKDxnZBQ==",
|
||||
"dependencies": {
|
||||
"@formatjs/ecma402-abstract": "1.11.3",
|
||||
"tslib": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@formatjs/intl-localematcher": {
|
||||
"version": "0.2.24",
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/intl-localematcher/-/intl-localematcher-0.2.24.tgz",
|
||||
"integrity": "sha512-K/HRGo6EMnCbhpth/y3u4rW4aXkmQNqRe1L2G+Y5jNr3v0gYhvaucV8WixNju/INAMbPBlbsRBRo/nfjnoOnxQ==",
|
||||
"dependencies": {
|
||||
"tslib": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/@nodelib/fs.scandir": {
|
||||
"version": "2.1.5",
|
||||
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
|
||||
@ -215,6 +260,14 @@
|
||||
"node": ">=0.12"
|
||||
}
|
||||
},
|
||||
"node_modules/deepmerge": {
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.2.2.tgz",
|
||||
"integrity": "sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg==",
|
||||
"engines": {
|
||||
"node": ">=0.10.0"
|
||||
}
|
||||
},
|
||||
"node_modules/detect-indent": {
|
||||
"version": "6.1.0",
|
||||
"resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-6.1.0.tgz",
|
||||
@ -546,6 +599,11 @@
|
||||
"node": ">=12"
|
||||
}
|
||||
},
|
||||
"node_modules/estree-walker": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz",
|
||||
"integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w=="
|
||||
},
|
||||
"node_modules/fast-glob": {
|
||||
"version": "3.2.11",
|
||||
"resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.11.tgz",
|
||||
@ -641,6 +699,16 @@
|
||||
"node": ">= 6"
|
||||
}
|
||||
},
|
||||
"node_modules/globalyzer": {
|
||||
"version": "0.1.0",
|
||||
"resolved": "https://registry.npmjs.org/globalyzer/-/globalyzer-0.1.0.tgz",
|
||||
"integrity": "sha512-40oNTM9UfG6aBmuKxk/giHn5nQ8RVz/SS4Ir6zgzOv9/qC3kKZ9v4etGTcJbEl/NyVQH7FGU7d+X1egr57Md2Q=="
|
||||
},
|
||||
"node_modules/globrex": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/globrex/-/globrex-0.1.2.tgz",
|
||||
"integrity": "sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg=="
|
||||
},
|
||||
"node_modules/graceful-fs": {
|
||||
"version": "4.2.9",
|
||||
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz",
|
||||
@ -686,6 +754,17 @@
|
||||
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
|
||||
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
|
||||
},
|
||||
"node_modules/intl-messageformat": {
|
||||
"version": "9.11.4",
|
||||
"resolved": "https://registry.npmjs.org/intl-messageformat/-/intl-messageformat-9.11.4.tgz",
|
||||
"integrity": "sha512-77TSkNubIy/hsapz6LQpyR6OADcxhWdhSaboPb5flMaALCVkPvAIxr48AlPqaMl4r1anNcvR9rpLWVdwUY1IKg==",
|
||||
"dependencies": {
|
||||
"@formatjs/ecma402-abstract": "1.11.3",
|
||||
"@formatjs/fast-memoize": "1.2.1",
|
||||
"@formatjs/icu-messageformat-parser": "2.0.18",
|
||||
"tslib": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"node_modules/is-binary-path": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
|
||||
@ -1156,6 +1235,27 @@
|
||||
"svelte": "^3.24.0"
|
||||
}
|
||||
},
|
||||
"node_modules/svelte-i18n": {
|
||||
"version": "3.3.13",
|
||||
"resolved": "https://registry.npmjs.org/svelte-i18n/-/svelte-i18n-3.3.13.tgz",
|
||||
"integrity": "sha512-RQM+ys4+Y9ztH//tX22H1UL2cniLNmIR+N4xmYygV6QpQ6EyQvloZiENRew8XrVzfvJ8HaE8NU6/yurLkl7z3g==",
|
||||
"dependencies": {
|
||||
"deepmerge": "^4.2.2",
|
||||
"estree-walker": "^2.0.1",
|
||||
"intl-messageformat": "^9.3.15",
|
||||
"sade": "^1.7.4",
|
||||
"tiny-glob": "^0.2.6"
|
||||
},
|
||||
"bin": {
|
||||
"svelte-i18n": "dist/cli.js"
|
||||
},
|
||||
"engines": {
|
||||
"node": ">= 11.15.0"
|
||||
},
|
||||
"peerDependencies": {
|
||||
"svelte": "^3.25.1"
|
||||
}
|
||||
},
|
||||
"node_modules/svelte-preprocess": {
|
||||
"version": "4.10.3",
|
||||
"resolved": "https://registry.npmjs.org/svelte-preprocess/-/svelte-preprocess-4.10.3.tgz",
|
||||
@ -1221,6 +1321,15 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/tiny-glob": {
|
||||
"version": "0.2.9",
|
||||
"resolved": "https://registry.npmjs.org/tiny-glob/-/tiny-glob-0.2.9.tgz",
|
||||
"integrity": "sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg==",
|
||||
"dependencies": {
|
||||
"globalyzer": "0.1.0",
|
||||
"globrex": "^0.1.2"
|
||||
}
|
||||
},
|
||||
"node_modules/tinypool": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.1.2.tgz",
|
||||
@ -1248,6 +1357,11 @@
|
||||
"node": ">=8.0"
|
||||
}
|
||||
},
|
||||
"node_modules/tslib": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.1.tgz",
|
||||
"integrity": "sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw=="
|
||||
},
|
||||
"node_modules/type-detect": {
|
||||
"version": "4.0.8",
|
||||
"resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
|
||||
@ -1354,6 +1468,50 @@
|
||||
}
|
||||
},
|
||||
"dependencies": {
|
||||
"@formatjs/ecma402-abstract": {
|
||||
"version": "1.11.3",
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/ecma402-abstract/-/ecma402-abstract-1.11.3.tgz",
|
||||
"integrity": "sha512-kP/Buv5vVFMAYLHNvvUzr0lwRTU0u2WTy44Tqwku1X3C3lJ5dKqDCYVqA8wL+Y19Bq+MwHgxqd5FZJRCIsLRyQ==",
|
||||
"requires": {
|
||||
"@formatjs/intl-localematcher": "0.2.24",
|
||||
"tslib": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"@formatjs/fast-memoize": {
|
||||
"version": "1.2.1",
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/fast-memoize/-/fast-memoize-1.2.1.tgz",
|
||||
"integrity": "sha512-Rg0e76nomkz3vF9IPlKeV+Qynok0r7YZjL6syLz4/urSg0IbjPZCB/iYUMNsYA643gh4mgrX3T7KEIFIxJBQeg==",
|
||||
"requires": {
|
||||
"tslib": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"@formatjs/icu-messageformat-parser": {
|
||||
"version": "2.0.18",
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/icu-messageformat-parser/-/icu-messageformat-parser-2.0.18.tgz",
|
||||
"integrity": "sha512-vquIzsAJJmZ5jWVH8dEgUKcbG4yu3KqtyPet+q35SW5reLOvblkfeCXTRW2TpIwNXzdVqsJBwjbTiRiSU9JxwQ==",
|
||||
"requires": {
|
||||
"@formatjs/ecma402-abstract": "1.11.3",
|
||||
"@formatjs/icu-skeleton-parser": "1.3.5",
|
||||
"tslib": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"@formatjs/icu-skeleton-parser": {
|
||||
"version": "1.3.5",
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/icu-skeleton-parser/-/icu-skeleton-parser-1.3.5.tgz",
|
||||
"integrity": "sha512-Nhyo2/6kG7ZfgeEfo02sxviOuBcvtzH6SYUharj3DLCDJH3A/4OxkKcmx/2PWGX4bc6iSieh+FA94CsKDxnZBQ==",
|
||||
"requires": {
|
||||
"@formatjs/ecma402-abstract": "1.11.3",
|
||||
"tslib": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"@formatjs/intl-localematcher": {
|
||||
"version": "0.2.24",
|
||||
"resolved": "https://registry.npmjs.org/@formatjs/intl-localematcher/-/intl-localematcher-0.2.24.tgz",
|
||||
"integrity": "sha512-K/HRGo6EMnCbhpth/y3u4rW4aXkmQNqRe1L2G+Y5jNr3v0gYhvaucV8WixNju/INAMbPBlbsRBRo/nfjnoOnxQ==",
|
||||
"requires": {
|
||||
"tslib": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"@nodelib/fs.scandir": {
|
||||
"version": "2.1.5",
|
||||
"resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz",
|
||||
@ -1506,6 +1664,11 @@
|
||||
"type-detect": "^4.0.0"
|
||||
}
|
||||
},
|
||||
"deepmerge": {
|
||||
"version": "4.2.2",
|
||||
"resolved": "https://registry.npmjs.org/deepmerge/-/deepmerge-4.2.2.tgz",
|
||||
"integrity": "sha512-FJ3UgI4gIl+PHZm53knsuSFpE+nESMr7M4v9QcgB7S63Kj/6WqMiFQJpBBYz1Pt+66bZpP3Q7Lye0Oo9MPKEdg=="
|
||||
},
|
||||
"detect-indent": {
|
||||
"version": "6.1.0",
|
||||
"resolved": "https://registry.npmjs.org/detect-indent/-/detect-indent-6.1.0.tgz",
|
||||
@ -1656,6 +1819,11 @@
|
||||
"integrity": "sha512-8Sbo0zpzgwWrwjQYLmHF78f7E2xg5Ve63bjB2ng3V2aManilnnTGaliq2snYg+NOX60+hEvJHRdVnuIAHW0lVw==",
|
||||
"optional": true
|
||||
},
|
||||
"estree-walker": {
|
||||
"version": "2.0.2",
|
||||
"resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz",
|
||||
"integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w=="
|
||||
},
|
||||
"fast-glob": {
|
||||
"version": "3.2.11",
|
||||
"resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.2.11.tgz",
|
||||
@ -1726,6 +1894,16 @@
|
||||
"is-glob": "^4.0.1"
|
||||
}
|
||||
},
|
||||
"globalyzer": {
|
||||
"version": "0.1.0",
|
||||
"resolved": "https://registry.npmjs.org/globalyzer/-/globalyzer-0.1.0.tgz",
|
||||
"integrity": "sha512-40oNTM9UfG6aBmuKxk/giHn5nQ8RVz/SS4Ir6zgzOv9/qC3kKZ9v4etGTcJbEl/NyVQH7FGU7d+X1egr57Md2Q=="
|
||||
},
|
||||
"globrex": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/globrex/-/globrex-0.1.2.tgz",
|
||||
"integrity": "sha512-uHJgbwAMwNFf5mLst7IWLNg14x1CkeqglJb/K3doi4dw6q2IvAAmM/Y81kevy83wP+Sst+nutFTYOGg3d1lsxg=="
|
||||
},
|
||||
"graceful-fs": {
|
||||
"version": "4.2.9",
|
||||
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.9.tgz",
|
||||
@ -1762,6 +1940,17 @@
|
||||
"resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz",
|
||||
"integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ=="
|
||||
},
|
||||
"intl-messageformat": {
|
||||
"version": "9.11.4",
|
||||
"resolved": "https://registry.npmjs.org/intl-messageformat/-/intl-messageformat-9.11.4.tgz",
|
||||
"integrity": "sha512-77TSkNubIy/hsapz6LQpyR6OADcxhWdhSaboPb5flMaALCVkPvAIxr48AlPqaMl4r1anNcvR9rpLWVdwUY1IKg==",
|
||||
"requires": {
|
||||
"@formatjs/ecma402-abstract": "1.11.3",
|
||||
"@formatjs/fast-memoize": "1.2.1",
|
||||
"@formatjs/icu-messageformat-parser": "2.0.18",
|
||||
"tslib": "^2.1.0"
|
||||
}
|
||||
},
|
||||
"is-binary-path": {
|
||||
"version": "2.1.0",
|
||||
"resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz",
|
||||
@ -2070,6 +2259,18 @@
|
||||
"typescript": "*"
|
||||
}
|
||||
},
|
||||
"svelte-i18n": {
|
||||
"version": "3.3.13",
|
||||
"resolved": "https://registry.npmjs.org/svelte-i18n/-/svelte-i18n-3.3.13.tgz",
|
||||
"integrity": "sha512-RQM+ys4+Y9ztH//tX22H1UL2cniLNmIR+N4xmYygV6QpQ6EyQvloZiENRew8XrVzfvJ8HaE8NU6/yurLkl7z3g==",
|
||||
"requires": {
|
||||
"deepmerge": "^4.2.2",
|
||||
"estree-walker": "^2.0.1",
|
||||
"intl-messageformat": "^9.3.15",
|
||||
"sade": "^1.7.4",
|
||||
"tiny-glob": "^0.2.6"
|
||||
}
|
||||
},
|
||||
"svelte-preprocess": {
|
||||
"version": "4.10.3",
|
||||
"resolved": "https://registry.npmjs.org/svelte-preprocess/-/svelte-preprocess-4.10.3.tgz",
|
||||
@ -2083,6 +2284,15 @@
|
||||
"strip-indent": "^3.0.0"
|
||||
}
|
||||
},
|
||||
"tiny-glob": {
|
||||
"version": "0.2.9",
|
||||
"resolved": "https://registry.npmjs.org/tiny-glob/-/tiny-glob-0.2.9.tgz",
|
||||
"integrity": "sha512-g/55ssRPUjShh+xkfx9UPDXqhckHEsHr4Vd9zX55oSdGZc/MD0m3sferOkwWtp98bv+kcVfEHtRJgBVJzelrzg==",
|
||||
"requires": {
|
||||
"globalyzer": "0.1.0",
|
||||
"globrex": "^0.1.2"
|
||||
}
|
||||
},
|
||||
"tinypool": {
|
||||
"version": "0.1.2",
|
||||
"resolved": "https://registry.npmjs.org/tinypool/-/tinypool-0.1.2.tgz",
|
||||
@ -2101,6 +2311,11 @@
|
||||
"is-number": "^7.0.0"
|
||||
}
|
||||
},
|
||||
"tslib": {
|
||||
"version": "2.3.1",
|
||||
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.3.1.tgz",
|
||||
"integrity": "sha512-77EbyPPpMz+FRFRuAFlWMtmgUWGe9UOG2Z25NqCwiIjRhOf5iKGuzSe5P2w1laq+FkRy4p+PCuVkJSGkzTEKVw=="
|
||||
},
|
||||
"type-detect": {
|
||||
"version": "4.0.8",
|
||||
"resolved": "https://registry.npmjs.org/type-detect/-/type-detect-4.0.8.tgz",
|
||||
|
@ -45,6 +45,12 @@
|
||||
"tailwindcss": "^3.0.23",
|
||||
"tinyspy": "^0.3.0",
|
||||
"vite": "^2.9.1",
|
||||
"vitest": "^0.7.4"
|
||||
"vitest": "^0.3.2",
|
||||
"plotly.js-dist-min": "^2.10.1",
|
||||
"babylonjs": "^4.2.1",
|
||||
"babylonjs-loaders": "^4.2.1"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/three": "^0.138.0"
|
||||
}
|
||||
}
|
||||
|
7474
ui/packages/app/public/static/img/Bunny.obj
Normal file
7474
ui/packages/app/public/static/img/Bunny.obj
Normal file
File diff suppressed because it is too large
Load Diff
BIN
ui/packages/app/public/static/img/Duck.glb
Normal file
BIN
ui/packages/app/public/static/img/Duck.glb
Normal file
Binary file not shown.
@ -110,7 +110,7 @@ window.launchGradio = (config: Config, element_query: string) => {
|
||||
};
|
||||
|
||||
window.launchGradioFromSpaces = async (space: string, target: string) => {
|
||||
const space_url = `https://huggingface.co/gradioiframe/${space}/+/`;
|
||||
const space_url = `https://hf.space/embed/${space}/+/`;
|
||||
let config = await fetch(space_url + "config");
|
||||
let _config: Config = await config.json();
|
||||
_config.root = space_url;
|
||||
|
@ -5,9 +5,10 @@
|
||||
};
|
||||
|
||||
export let style: string = "";
|
||||
export let theme: string = "default";
|
||||
</script>
|
||||
|
||||
<div class="output-label">
|
||||
<div class="output-label" {theme}>
|
||||
<div
|
||||
class="output-class font-bold text-2xl py-6 px-4 flex-grow flex items-center justify-center"
|
||||
class:no-confidence={!("confidences" in value)}
|
||||
|
35
ui/pnpm-lock.yaml
generated
35
ui/pnpm-lock.yaml
generated
@ -17,6 +17,10 @@ importers:
|
||||
polka: ^1.0.0-next.22
|
||||
postcss: ^8.4.5
|
||||
postcss-nested: ^5.0.6
|
||||
'@types/three': ^0.138.0
|
||||
babylonjs: ^4.2.1
|
||||
babylonjs-loaders: ^4.2.1
|
||||
plotly.js-dist-min: ^2.10.1
|
||||
prettier: ^2.5.1
|
||||
prettier-plugin-svelte: ^2.6.0
|
||||
sirv: ^2.0.2
|
||||
@ -43,6 +47,9 @@ importers:
|
||||
polka: 1.0.0-next.22
|
||||
postcss: 8.4.6
|
||||
postcss-nested: 5.0.6_postcss@8.4.6
|
||||
babylonjs: 4.2.1
|
||||
babylonjs-loaders: 4.2.1
|
||||
plotly.js-dist-min: 2.10.1
|
||||
prettier: 2.5.1
|
||||
prettier-plugin-svelte: 2.6.0_prettier@2.5.1+svelte@3.46.3
|
||||
sirv: 2.0.2
|
||||
@ -55,6 +62,9 @@ importers:
|
||||
tinyspy: 0.3.0
|
||||
vite: 2.9.1
|
||||
vitest: 0.7.4_happy-dom@2.49.0
|
||||
vitest: 0.3.5
|
||||
devDependencies:
|
||||
'@types/three': 0.138.0
|
||||
|
||||
packages/app:
|
||||
specifiers:
|
||||
@ -1243,6 +1253,10 @@ packages:
|
||||
dev: false
|
||||
optional: true
|
||||
|
||||
/@types/three/0.138.0:
|
||||
resolution: {integrity: sha512-D8AoV7h2kbCfrv/DcebHOFh1WDwyus3HdooBkAwcBikXArdqnsQ38PQ85JCunnvun160oA9jz53GszF3zch3tg==}
|
||||
dev: true
|
||||
|
||||
/abab/2.0.5:
|
||||
resolution: {integrity: sha512-9IK9EadsbHo6jLWIpxpR6pL0sazTXV6+SQv25ZB+F7Bj9mJNaOc4nCRabwd5M/JwmUa8idz6Eci6eKfJryPs6Q==}
|
||||
dev: false
|
||||
@ -1440,6 +1454,7 @@ packages:
|
||||
dev: false
|
||||
optional: true
|
||||
|
||||
<<<<<<< HEAD
|
||||
/babel-plugin-dynamic-import-node/2.3.3:
|
||||
resolution: {integrity: sha512-jZVI+s9Zg3IqA/kdi0i6UDCybUI3aSBLnglhYbSSjKlV7yF1F/5LWv8MakQmvYpnbJDS6fcBL2KzHSxNCMtWSQ==}
|
||||
dependencies:
|
||||
@ -1459,6 +1474,21 @@ packages:
|
||||
|
||||
/babylonjs/4.2.2:
|
||||
resolution: {integrity: sha512-p7mTi6+nLuWJTLbwxEJxLOh/QMHMV2KA0bviEoQSK5VtsAq1F0JghoOZYRs4aEqAZF/deFPWvMQk1vbXJ+4eEA==}
|
||||
=======
|
||||
/babylonjs-gltf2interface/4.2.1:
|
||||
resolution: {integrity: sha512-ZBfKgIoztO1x1nyf9aPQJ+WXmB6Kw0VlyxvcKchIixbICqeeExiN8nmjvypwXC4hl+5ZDMnUKQNrIhh7uzulnA==}
|
||||
dev: false
|
||||
|
||||
/babylonjs-loaders/4.2.1:
|
||||
resolution: {integrity: sha512-WLpbadXDyxbBQogU0SOrpwgAWN/DJ1xn5kCRD31NVoCbBIpvkdDIvzpRvze2esxrlv/KM8wbDu62ShJd6rQnVQ==}
|
||||
dependencies:
|
||||
babylonjs: 4.2.1
|
||||
babylonjs-gltf2interface: 4.2.1
|
||||
dev: false
|
||||
|
||||
/babylonjs/4.2.1:
|
||||
resolution: {integrity: sha512-FQdJ2VTENUpUJQ30ddihwTjV6K94kglET0P7jV8OQzjA4eez3sotmG22Fn9+8yb069SA26KnrAGOI2sKMQ7BCw==}
|
||||
>>>>>>> main
|
||||
dev: false
|
||||
|
||||
/balanced-match/1.0.2:
|
||||
@ -3361,6 +3391,7 @@ packages:
|
||||
resolution: {integrity: sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==}
|
||||
engines: {node: '>=8.6'}
|
||||
|
||||
<<<<<<< HEAD
|
||||
/pidtree/0.3.1:
|
||||
resolution: {integrity: sha512-qQbW94hLHEqCg7nhby4yRC7G2+jYHY4Rguc2bjw7Uug4GIJuu1tvf2uHaZv5Q8zdt+WKJ6qK1FOI6amaWUo5FA==}
|
||||
engines: {node: '>=0.10'}
|
||||
@ -3415,6 +3446,10 @@ packages:
|
||||
|
||||
/plotly.js-dist-min/2.11.1:
|
||||
resolution: {integrity: sha512-F9WWNht0D3yBLZGHbLoJNfvplXvy+GUPSsA/lCbMuYd/UwzSu6Vmyprxlps9Einw1LDS1hYBrJeioK0lE3ieXA==}
|
||||
=======
|
||||
/plotly.js-dist-min/2.10.1:
|
||||
resolution: {integrity: sha512-H0ls1C2uu2U+qWw76djo4/zOGtUKfMILwFhu7tCOaG/wH5ypujrYGCH03N9SQVf1SXcctTfW57USf8LmagSiPQ==}
|
||||
>>>>>>> main
|
||||
dev: false
|
||||
|
||||
/pn/1.1.0:
|
||||
|
Loading…
x
Reference in New Issue
Block a user