mirror of
https://github.com/gradio-app/gradio.git
synced 2025-03-01 11:45:36 +08:00
Remember token locally with gr.load()
(#9966)
* localstate * add changeset * changes * changes * changes * add changeset * changes * add changeset * format * notebook * some changes * add changeset * format * fix * changes * fix js lint and ts * add changeset * fix pytest * component demo * rename * rename * notebooks * changes * add changeset * revert * changes * revert * revert * revert * changes * changes * format * fix * notebook * docstring * guide * types * demo * push * cleanup * demo * add changeset * notebook * add changeset * clean * add changeset --------- Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com>
This commit is contained in:
parent
cfb62bfdb5
commit
da6f191554
5
.changeset/vast-hotels-chew.md
Normal file
5
.changeset/vast-hotels-chew.md
Normal file
@ -0,0 +1,5 @@
|
||||
---
|
||||
"gradio": patch
|
||||
---
|
||||
|
||||
feat:Remember token locally with `gr.load()`
|
1
demo/load_model_with_token/run.ipynb
Normal file
1
demo/load_model_with_token/run.ipynb
Normal file
@ -0,0 +1 @@
|
||||
{"cells": [{"cell_type": "markdown", "id": "302934307671667531413257853548643485645", "metadata": {}, "source": ["# Gradio Demo: load_model_with_token"]}, {"cell_type": "code", "execution_count": null, "id": "272996653310673477252411125948039410165", "metadata": {}, "outputs": [], "source": ["!pip install -q gradio "]}, {"cell_type": "code", "execution_count": null, "id": "288918539441861185822528903084949547379", "metadata": {}, "outputs": [], "source": ["import gradio as gr\n", "\n", "# This demo requires a Hugging Face PRO token.\n", "demo = gr.load(\"meta-llama/Meta-Llama-3-8B-Instruct\", src=\"models\", accept_token=True)\n", "\n", "if __name__ == \"__main__\":\n", " demo.launch()\n"]}], "metadata": {}, "nbformat": 4, "nbformat_minor": 5}
|
7
demo/load_model_with_token/run.py
Normal file
7
demo/load_model_with_token/run.py
Normal file
@ -0,0 +1,7 @@
|
||||
import gradio as gr
|
||||
|
||||
# This demo requires a Hugging Face PRO token.
|
||||
demo = gr.load("meta-llama/Meta-Llama-3-8B-Instruct", src="models", accept_token=True)
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch()
|
@ -89,11 +89,40 @@ def load(
|
||||
import gradio as gr
|
||||
|
||||
with gr.Blocks(fill_height=True) as demo:
|
||||
textbox = gr.Textbox(
|
||||
type="password",
|
||||
label="Token",
|
||||
info="Enter your token and press enter.",
|
||||
with gr.Accordion("Enter your token and press enter") as accordion:
|
||||
textbox = gr.Textbox(
|
||||
type="password",
|
||||
show_label=False,
|
||||
container=False,
|
||||
)
|
||||
remember_token = gr.Checkbox(
|
||||
label="Remember me on this device", value=False, container=False
|
||||
)
|
||||
browser_state = gr.BrowserState()
|
||||
|
||||
@gr.on([textbox.submit], outputs=accordion)
|
||||
def hide_accordion():
|
||||
return gr.Accordion("Token settings", open=False)
|
||||
|
||||
@gr.on(
|
||||
[textbox.submit, remember_token.change],
|
||||
inputs=[textbox, remember_token],
|
||||
outputs=[browser_state, remember_token],
|
||||
)
|
||||
def save_token(token_value, remember_token_value):
|
||||
if remember_token_value and token_value:
|
||||
return token_value, gr.Checkbox(
|
||||
label="Remember me on this device (saved!)", value=True
|
||||
)
|
||||
else:
|
||||
return "", gr.Checkbox(label="Remember me on this device")
|
||||
|
||||
@gr.on(demo.load, inputs=[browser_state], outputs=[textbox, remember_token])
|
||||
def load_token(token_value):
|
||||
if token_value:
|
||||
return token_value, True
|
||||
else:
|
||||
return "", False
|
||||
|
||||
@gr.render(inputs=[textbox], triggers=[textbox.submit])
|
||||
def create(token_value):
|
||||
|
Loading…
Reference in New Issue
Block a user