mirror of
https://github.com/gradio-app/gradio.git
synced 2025-03-01 11:45:36 +08:00
Fix loading private Spaces (#4430)
* replace api key with hf_token * removed extra slash * changelog * update notebooks * lint * changelog * changelog * revert demo
This commit is contained in:
parent
866b57b93c
commit
d82dfb886d
@ -9,6 +9,8 @@
|
||||
- Remove target="_blank" override on anchor tags with internal targets by [@hannahblair](https://github.com/hannahblair) in [PR 4405](https://github.com/gradio-app/gradio/pull/4405)
|
||||
- Fixed bug where `gr.File(file_count='multiple')` could not be cached as output by [@freddyaboulton](https://github.com/freddyaboulton) in [PR 4421](https://github.com/gradio-app/gradio/pull/4421)
|
||||
- Restricts the domains that can be proxied via `/proxy` route by [@abidlabs](https://github.com/abidlabs) in [PR 4406](https://github.com/gradio-app/gradio/pull/4406).
|
||||
- Fixes bug where `/proxy` route was being incorrectly constructed by the frontend by [@abidlabs](https://github.com/abidlabs) in [PR 4430](https://github.com/gradio-app/gradio/pull/4430).
|
||||
- Fix z-index of status component by [@hannahblair](https://github.com/hannahblair) in [PR 4429](https://github.com/gradio-app/gradio/pull/4429)
|
||||
- Fix video rendering in Safari by [@aliabid94](https://github.com/aliabid94) in [PR 4433](https://github.com/gradio-app/gradio/pull/4433).
|
||||
|
||||
## Other Changes:
|
||||
@ -27,7 +29,7 @@ No changes to highlight.
|
||||
|
||||
## Bug Fixes:
|
||||
|
||||
- Fix bug for get_continuous_fn by [@dkjshk](https://github.com/dkjshk) in [PR 4434](https://github.com/gradio-app/gradio/pull/4434)
|
||||
- Allow `every` to work with generators by [@dkjshk](https://github.com/dkjshk) in [PR 4434](https://github.com/gradio-app/gradio/pull/4434)
|
||||
- Fix z-index of status component by [@hannahblair](https://github.com/hannahblair) in [PR 4429](https://github.com/gradio-app/gradio/pull/4429)
|
||||
- Allow gradio to work offline, by [@aliabid94](https://github.com/aliabid94) in [PR 4398](https://github.com/gradio-app/gradio/pull/4398).
|
||||
- Fixed `validate_url` to check for 403 errors and use a GET request in place of a HEAD by [@alvindaiyan](https://github.com/alvindaiyan) in [PR 4388](https://github.com/gradio-app/gradio/pull/4388).
|
||||
|
@ -745,7 +745,7 @@ function normalise_file(
|
||||
if (!root_url) {
|
||||
file.data = root + "/file=" + file.name;
|
||||
} else {
|
||||
file.data = "/proxy=" + root_url + "/file=" + file.name;
|
||||
file.data = "/proxy=" + root_url + "file=" + file.name;
|
||||
}
|
||||
}
|
||||
return file;
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: autocomplete\n", "### This text generation demo works like autocomplete. There's only one textbox and it's used for both the input and the output. The demo loads the model as an interface, and uses that interface as an API. It then uses blocks to create the UI. All of this is done in less than 10 lines of code.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\n", "auth_token = os.getenv(\"auth_token\")\n", "\n", "# load a model from https://hf.co/models as an interface, then use it as an api \n", "# you can remove the api_key parameter if you don't care about rate limiting. \n", "api = gr.load(\"huggingface/EleutherAI/gpt-j-6B\", api_key=auth_token)\n", "\n", "def complete_with_gpt(text):\n", " return text[:-50] + api(text[-50:])\n", "\n", "with gr.Blocks() as demo:\n", " textbox = gr.Textbox(placeholder=\"Type here...\", lines=4)\n", " btn = gr.Button(\"Autocomplete\")\n", " \n", " # define what will run when the button is clicked, here the textbox is used as both an input and an output\n", " btn.click(fn=complete_with_gpt, inputs=textbox, outputs=textbox, queue=False)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: autocomplete\n", "### This text generation demo works like autocomplete. There's only one textbox and it's used for both the input and the output. The demo loads the model as an interface, and uses that interface as an API. It then uses blocks to create the UI. All of this is done in less than 10 lines of code.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\n", "auth_token = os.getenv(\"auth_token\")\n", "\n", "# load a model from https://hf.co/models as an interface, then use it as an api \n", "# you can remove the api_key parameter if you don't care about rate limiting. \n", "api = gr.load(\"huggingface/EleutherAI/gpt-j-6B\", hf_token=auth_token)\n", "\n", "def complete_with_gpt(text):\n", " return text[:-50] + api(text[-50:])\n", "\n", "with gr.Blocks() as demo:\n", " textbox = gr.Textbox(placeholder=\"Type here...\", lines=4)\n", " btn = gr.Button(\"Autocomplete\")\n", " \n", " # define what will run when the button is clicked, here the textbox is used as both an input and an output\n", " btn.click(fn=complete_with_gpt, inputs=textbox, outputs=textbox, queue=False)\n", "\n", "demo.launch()"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -6,7 +6,7 @@ auth_token = os.getenv("auth_token")
|
||||
|
||||
# load a model from https://hf.co/models as an interface, then use it as an api
|
||||
# you can remove the api_key parameter if you don't care about rate limiting.
|
||||
api = gr.load("huggingface/EleutherAI/gpt-j-6B", api_key=auth_token)
|
||||
api = gr.load("huggingface/EleutherAI/gpt-j-6B", hf_token=auth_token)
|
||||
|
||||
def complete_with_gpt(text):
|
||||
return text[:-50] + api(text[-50:])
|
||||
|
@ -1 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: automatic-speech-recognition\n", "### Automatic speech recognition English. Record from your microphone and the app will transcribe the audio.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\n", "auth_token = os.getenv(\"auth_token\")\n", "\n", "# automatically load the interface from a HF model \n", "# you can remove the api_key parameter if you don't care about rate limiting. \n", "demo = gr.load(\n", " \"huggingface/facebook/wav2vec2-base-960h\",\n", " title=\"Speech-to-text\",\n", " inputs=\"mic\",\n", " description=\"Let me try to guess what you're saying!\",\n", " api_key=auth_token\n", ")\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
||||
{"cells": [{"cell_type": "markdown", "id": 302934307671667531413257853548643485645, "metadata": {}, "source": ["# Gradio Demo: automatic-speech-recognition\n", "### Automatic speech recognition English. Record from your microphone and the app will transcribe the audio.\n", " "]}, {"cell_type": "code", "execution_count": null, "id": 272996653310673477252411125948039410165, "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": 288918539441861185822528903084949547379, "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "import os\n", "\n", "# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting\n", "auth_token = os.getenv(\"auth_token\")\n", "\n", "# automatically load the interface from a HF model \n", "# you can remove the api_key parameter if you don't care about rate limiting. \n", "demo = gr.load(\n", " \"huggingface/facebook/wav2vec2-base-960h\",\n", " title=\"Speech-to-text\",\n", " inputs=\"mic\",\n", " description=\"Let me try to guess what you're saying!\",\n", " hf_token=auth_token\n", ")\n", "\n", "demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
@ -11,7 +11,7 @@ demo = gr.load(
|
||||
title="Speech-to-text",
|
||||
inputs="mic",
|
||||
description="Let me try to guess what you're saying!",
|
||||
api_key=auth_token
|
||||
hf_token=auth_token
|
||||
)
|
||||
|
||||
demo.launch()
|
||||
|
@ -66,14 +66,14 @@ def load(
|
||||
)
|
||||
hf_token = api_key
|
||||
return load_blocks_from_repo(
|
||||
name=name, src=src, api_key=hf_token, alias=alias, **kwargs
|
||||
name=name, src=src, hf_token=hf_token, alias=alias, **kwargs
|
||||
)
|
||||
|
||||
|
||||
def load_blocks_from_repo(
|
||||
name: str,
|
||||
src: str | None = None,
|
||||
api_key: str | None = None,
|
||||
hf_token: str | None = None,
|
||||
alias: str | None = None,
|
||||
**kwargs,
|
||||
) -> Blocks:
|
||||
@ -97,14 +97,14 @@ def load_blocks_from_repo(
|
||||
src.lower() in factory_methods
|
||||
), f"parameter: src must be one of {factory_methods.keys()}"
|
||||
|
||||
if api_key is not None:
|
||||
if Context.hf_token is not None and Context.hf_token != api_key:
|
||||
if hf_token is not None:
|
||||
if Context.hf_token is not None and Context.hf_token != hf_token:
|
||||
warnings.warn(
|
||||
"""You are loading a model/Space with a different access token than the one you used to load a previous model/Space. This is not recommended, as it may cause unexpected behavior."""
|
||||
)
|
||||
Context.hf_token = api_key
|
||||
Context.hf_token = hf_token
|
||||
|
||||
blocks: gradio.Blocks = factory_methods[src](name, api_key, alias, **kwargs)
|
||||
blocks: gradio.Blocks = factory_methods[src](name, hf_token, alias, **kwargs)
|
||||
return blocks
|
||||
|
||||
|
||||
@ -134,12 +134,12 @@ def chatbot_postprocess(response):
|
||||
return chatbot_value, response_json
|
||||
|
||||
|
||||
def from_model(model_name: str, api_key: str | None, alias: str | None, **kwargs):
|
||||
def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwargs):
|
||||
model_url = f"https://huggingface.co/{model_name}"
|
||||
api_url = f"https://api-inference.huggingface.co/models/{model_name}"
|
||||
print(f"Fetching model from: {model_url}")
|
||||
|
||||
headers = {"Authorization": f"Bearer {api_key}"} if api_key is not None else {}
|
||||
headers = {"Authorization": f"Bearer {hf_token}"} if hf_token is not None else {}
|
||||
|
||||
# Checking if model exists, and if so, it gets the pipeline
|
||||
response = requests.request("GET", api_url, headers=headers)
|
||||
@ -439,15 +439,15 @@ def from_model(model_name: str, api_key: str | None, alias: str | None, **kwargs
|
||||
|
||||
|
||||
def from_spaces(
|
||||
space_name: str, api_key: str | None, alias: str | None, **kwargs
|
||||
space_name: str, hf_token: str | None, alias: str | None, **kwargs
|
||||
) -> Blocks:
|
||||
space_url = f"https://huggingface.co/spaces/{space_name}"
|
||||
|
||||
print(f"Fetching Space from: {space_url}")
|
||||
|
||||
headers = {}
|
||||
if api_key is not None:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
if hf_token is not None:
|
||||
headers["Authorization"] = f"Bearer {hf_token}"
|
||||
|
||||
iframe_url = (
|
||||
requests.get(
|
||||
@ -473,7 +473,7 @@ def from_spaces(
|
||||
raise ValueError(f"Could not load the Space: {space_name}") from ae
|
||||
if "allow_flagging" in config: # Create an Interface for Gradio 2.x Spaces
|
||||
return from_spaces_interface(
|
||||
space_name, config, alias, api_key, iframe_url, **kwargs
|
||||
space_name, config, alias, hf_token, iframe_url, **kwargs
|
||||
)
|
||||
else: # Create a Blocks for Gradio 3.x Spaces
|
||||
if kwargs:
|
||||
@ -483,11 +483,11 @@ def from_spaces(
|
||||
"Blocks or Interface locally. You may find this Guide helpful: "
|
||||
"https://gradio.app/using_blocks_like_functions/"
|
||||
)
|
||||
return from_spaces_blocks(space=space_name, api_key=api_key)
|
||||
return from_spaces_blocks(space=space_name, hf_token=hf_token)
|
||||
|
||||
|
||||
def from_spaces_blocks(space: str, api_key: str | None) -> Blocks:
|
||||
client = Client(space, hf_token=api_key)
|
||||
def from_spaces_blocks(space: str, hf_token: str | None) -> Blocks:
|
||||
client = Client(space, hf_token=hf_token)
|
||||
predict_fns = [endpoint._predict_resolve for endpoint in client.endpoints]
|
||||
return gradio.Blocks.from_config(client.config, predict_fns, client.src)
|
||||
|
||||
@ -496,15 +496,15 @@ def from_spaces_interface(
|
||||
model_name: str,
|
||||
config: dict,
|
||||
alias: str | None,
|
||||
api_key: str | None,
|
||||
hf_token: str | None,
|
||||
iframe_url: str,
|
||||
**kwargs,
|
||||
) -> Interface:
|
||||
config = streamline_spaces_interface(config)
|
||||
api_url = f"{iframe_url}/api/predict/"
|
||||
headers = {"Content-Type": "application/json"}
|
||||
if api_key is not None:
|
||||
headers["Authorization"] = f"Bearer {api_key}"
|
||||
if hf_token is not None:
|
||||
headers["Authorization"] = f"Bearer {hf_token}"
|
||||
|
||||
# The function should call the API with preprocessed data
|
||||
def fn(*data):
|
||||
|
@ -23,7 +23,7 @@
|
||||
}>();
|
||||
|
||||
let samples_dir: string = root_url
|
||||
? "proxy=" + root_url + "/file="
|
||||
? "proxy=" + root_url + "file="
|
||||
: root + "/file=";
|
||||
let page = 0;
|
||||
$: gallery = components.length < 2;
|
||||
|
@ -45,7 +45,7 @@ export function normalise_file(
|
||||
if (root_url == null) {
|
||||
file.data = root + "/file=" + file.name;
|
||||
} else {
|
||||
file.data = "/proxy=" + root_url + "/file=" + file.name;
|
||||
file.data = "/proxy=" + root_url + "file=" + file.name;
|
||||
}
|
||||
}
|
||||
return file;
|
||||
|
@ -68,7 +68,7 @@ class TestLoadInterface:
|
||||
def test_summarization(self):
|
||||
model_type = "summarization"
|
||||
interface = gr.load(
|
||||
"models/facebook/bart-large-cnn", api_key=None, alias=model_type
|
||||
"models/facebook/bart-large-cnn", hf_token=None, alias=model_type
|
||||
)
|
||||
assert interface.__name__ == model_type
|
||||
assert isinstance(interface.input_components[0], gr.Textbox)
|
||||
@ -77,7 +77,7 @@ class TestLoadInterface:
|
||||
def test_translation(self):
|
||||
model_type = "translation"
|
||||
interface = gr.load(
|
||||
"models/facebook/bart-large-cnn", api_key=None, alias=model_type
|
||||
"models/facebook/bart-large-cnn", hf_token=None, alias=model_type
|
||||
)
|
||||
assert interface.__name__ == model_type
|
||||
assert isinstance(interface.input_components[0], gr.Textbox)
|
||||
@ -86,7 +86,7 @@ class TestLoadInterface:
|
||||
def test_text2text_generation(self):
|
||||
model_type = "text2text-generation"
|
||||
interface = gr.load(
|
||||
"models/sshleifer/tiny-mbart", api_key=None, alias=model_type
|
||||
"models/sshleifer/tiny-mbart", hf_token=None, alias=model_type
|
||||
)
|
||||
assert interface.__name__ == model_type
|
||||
assert isinstance(interface.input_components[0], gr.Textbox)
|
||||
@ -96,7 +96,7 @@ class TestLoadInterface:
|
||||
model_type = "text-classification"
|
||||
interface = gr.load(
|
||||
"models/distilbert-base-uncased-finetuned-sst-2-english",
|
||||
api_key=None,
|
||||
hf_token=None,
|
||||
alias=model_type,
|
||||
)
|
||||
assert interface.__name__ == model_type
|
||||
@ -105,7 +105,7 @@ class TestLoadInterface:
|
||||
|
||||
def test_fill_mask(self):
|
||||
model_type = "fill-mask"
|
||||
interface = gr.load("models/bert-base-uncased", api_key=None, alias=model_type)
|
||||
interface = gr.load("models/bert-base-uncased", hf_token=None, alias=model_type)
|
||||
assert interface.__name__ == model_type
|
||||
assert isinstance(interface.input_components[0], gr.Textbox)
|
||||
assert isinstance(interface.output_components[0], gr.Label)
|
||||
@ -113,7 +113,7 @@ class TestLoadInterface:
|
||||
def test_zero_shot_classification(self):
|
||||
model_type = "zero-shot-classification"
|
||||
interface = gr.load(
|
||||
"models/facebook/bart-large-mnli", api_key=None, alias=model_type
|
||||
"models/facebook/bart-large-mnli", hf_token=None, alias=model_type
|
||||
)
|
||||
assert interface.__name__ == model_type
|
||||
assert isinstance(interface.input_components[0], gr.Textbox)
|
||||
@ -124,7 +124,7 @@ class TestLoadInterface:
|
||||
def test_automatic_speech_recognition(self):
|
||||
model_type = "automatic-speech-recognition"
|
||||
interface = gr.load(
|
||||
"models/facebook/wav2vec2-base-960h", api_key=None, alias=model_type
|
||||
"models/facebook/wav2vec2-base-960h", hf_token=None, alias=model_type
|
||||
)
|
||||
assert interface.__name__ == model_type
|
||||
assert isinstance(interface.input_components[0], gr.Audio)
|
||||
@ -133,7 +133,7 @@ class TestLoadInterface:
|
||||
def test_image_classification(self):
|
||||
model_type = "image-classification"
|
||||
interface = gr.load(
|
||||
"models/google/vit-base-patch16-224", api_key=None, alias=model_type
|
||||
"models/google/vit-base-patch16-224", hf_token=None, alias=model_type
|
||||
)
|
||||
assert interface.__name__ == model_type
|
||||
assert isinstance(interface.input_components[0], gr.Image)
|
||||
@ -143,7 +143,7 @@ class TestLoadInterface:
|
||||
model_type = "feature-extraction"
|
||||
interface = gr.load(
|
||||
"models/sentence-transformers/distilbert-base-nli-mean-tokens",
|
||||
api_key=None,
|
||||
hf_token=None,
|
||||
alias=model_type,
|
||||
)
|
||||
assert interface.__name__ == model_type
|
||||
@ -154,7 +154,7 @@ class TestLoadInterface:
|
||||
model_type = "text-to-speech"
|
||||
interface = gr.load(
|
||||
"models/julien-c/ljspeech_tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space_train",
|
||||
api_key=None,
|
||||
hf_token=None,
|
||||
alias=model_type,
|
||||
)
|
||||
assert interface.__name__ == model_type
|
||||
@ -165,7 +165,7 @@ class TestLoadInterface:
|
||||
model_type = "text-to-speech"
|
||||
interface = gr.load(
|
||||
"models/julien-c/ljspeech_tts_train_tacotron2_raw_phn_tacotron_g2p_en_no_space_train",
|
||||
api_key=None,
|
||||
hf_token=None,
|
||||
alias=model_type,
|
||||
)
|
||||
assert interface.__name__ == model_type
|
||||
@ -175,7 +175,7 @@ class TestLoadInterface:
|
||||
def test_text_to_image(self):
|
||||
model_type = "text-to-image"
|
||||
interface = gr.load(
|
||||
"models/osanseviero/BigGAN-deep-128", api_key=None, alias=model_type
|
||||
"models/osanseviero/BigGAN-deep-128", hf_token=None, alias=model_type
|
||||
)
|
||||
assert interface.__name__ == model_type
|
||||
assert isinstance(interface.input_components[0], gr.Textbox)
|
||||
@ -288,8 +288,10 @@ class TestLoadInterface:
|
||||
pass
|
||||
|
||||
def test_private_space(self):
|
||||
api_key = "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" # Intentionally revealing this key for testing purposes
|
||||
io = gr.load("spaces/gradio-tests/not-actually-private-space", api_key=api_key)
|
||||
hf_token = "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" # Intentionally revealing this key for testing purposes
|
||||
io = gr.load(
|
||||
"spaces/gradio-tests/not-actually-private-space", hf_token=hf_token
|
||||
)
|
||||
try:
|
||||
output = io("abc")
|
||||
assert output == "abc"
|
||||
@ -298,9 +300,9 @@ class TestLoadInterface:
|
||||
pass
|
||||
|
||||
def test_private_space_audio(self):
|
||||
api_key = "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" # Intentionally revealing this key for testing purposes
|
||||
hf_token = "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" # Intentionally revealing this key for testing purposes
|
||||
io = gr.load(
|
||||
"spaces/gradio-tests/not-actually-private-space-audio", api_key=api_key
|
||||
"spaces/gradio-tests/not-actually-private-space-audio", hf_token=hf_token
|
||||
)
|
||||
try:
|
||||
output = io(media_data.BASE64_AUDIO["name"])
|
||||
@ -309,18 +311,18 @@ class TestLoadInterface:
|
||||
pass
|
||||
|
||||
def test_multiple_spaces_one_private(self):
|
||||
api_key = "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" # Intentionally revealing this key for testing purposes
|
||||
hf_token = "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" # Intentionally revealing this key for testing purposes
|
||||
with gr.Blocks():
|
||||
gr.load("spaces/gradio-tests/not-actually-private-space", api_key=api_key)
|
||||
gr.load("spaces/gradio-tests/not-actually-private-space", hf_token=hf_token)
|
||||
gr.load(
|
||||
"spaces/gradio/test-loading-examples",
|
||||
)
|
||||
assert Context.hf_token == api_key
|
||||
assert Context.hf_token == hf_token
|
||||
|
||||
def test_loading_files_via_proxy_works(self):
|
||||
api_key = "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" # Intentionally revealing this key for testing purposes
|
||||
hf_token = "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" # Intentionally revealing this key for testing purposes
|
||||
io = gr.load(
|
||||
"spaces/gradio-tests/test-loading-examples-private", hf_token=api_key
|
||||
"spaces/gradio-tests/test-loading-examples-private", hf_token=hf_token
|
||||
)
|
||||
assert io.theme.name == "default"
|
||||
app, _, _ = io.launch(prevent_thread_lock=True)
|
||||
|
Loading…
Reference in New Issue
Block a user