mirror of
https://github.com/gradio-app/gradio.git
synced 2025-01-06 10:25:17 +08:00
6e6121a1ac
* placeholder * changelog * added to readme * client * implement futures * utils * scripts * lint * reorg * scripts * serialization * cleanup * fns * serialize * cache * callbacks * updates * formatting * packaging * requirements * remove changelog * client * access token * formatting * deprecate * format backend * client replace * updates * moving from utils * remove code duplication * rm duplicates * simplify * galleryserializer * serializable * load serializers * fixing errors * errors * typing * tests * changelog * lint * fix lint * fixing files * formatting * type * fix type checking * changelog * changelog * Update client/python/gradio_client/client.py Co-authored-by: Lucain <lucainp@gmail.com> * formatting, tests * formatting, tests * gr.load * refactoring * refactoring' * formatting * formatting * tests * tests * fix tests * cleanup * added tests * adding scripts * formatting * address review comments * readme * serialize info * remove from changelog * version 0.0.2 released * lint * type fix * check * type issues * hf_token * update hf token * telemetry * docs, circle dependency * hf token * formatting * updates * sort * script * external * docs * formatting * fixes * scripts * requirements * fix tests * context * changes * formatting * fixes * format fix --------- Co-authored-by: Lucain <lucainp@gmail.com>
21 lines
806 B
Python
21 lines
806 B
Python
import gradio as gr
|
|
import os
|
|
|
|
# save your HF API token from https:/hf.co/settings/tokens as an env variable to avoid rate limiting
|
|
auth_token = os.getenv("auth_token")
|
|
|
|
# load a model from https://hf.co/models as an interface, then use it as an api
|
|
# you can remove the api_key parameter if you don't care about rate limiting.
|
|
api = gr.load("huggingface/EleutherAI/gpt-j-6B", api_key=auth_token)
|
|
|
|
def complete_with_gpt(text):
|
|
return text[:-50] + api(text[-50:])
|
|
|
|
with gr.Blocks() as demo:
|
|
textbox = gr.Textbox(placeholder="Type here...", lines=4)
|
|
btn = gr.Button("Autocomplete")
|
|
|
|
# define what will run when the button is clicked, here the textbox is used as both an input and an output
|
|
btn.click(fn=complete_with_gpt, inputs=textbox, outputs=textbox, queue=False)
|
|
|
|
demo.launch() |