mirror of
https://github.com/gradio-app/gradio.git
synced 2025-03-31 12:20:26 +08:00
switch from black to ruff formatter (#6543)
* migrate from black to ruff * fix script and dependencies * applying ruff * add changeset * add changeset * address ruff feedback * replace linter * fixed typing * fix typing --------- Co-authored-by: Abubakar Abid <abubakar@huggingface.co> Co-authored-by: gradio-pr-bot <gradio-pr-bot@users.noreply.github.com>
This commit is contained in:
parent
5d5ab8c73c
commit
8a70e83db9
6
.changeset/wise-feet-fold.md
Normal file
6
.changeset/wise-feet-fold.md
Normal file
@ -0,0 +1,6 @@
|
||||
---
|
||||
"gradio": patch
|
||||
"gradio_client": patch
|
||||
---
|
||||
|
||||
feat:switch from black to ruff formatter
|
@ -780,7 +780,10 @@ class Client:
|
||||
)
|
||||
if is_private:
|
||||
huggingface_hub.add_space_secret(
|
||||
space_id, "HF_TOKEN", hf_token, token=hf_token # type: ignore
|
||||
space_id,
|
||||
"HF_TOKEN",
|
||||
hf_token, # type: ignore
|
||||
token=hf_token,
|
||||
)
|
||||
|
||||
url = f"https://huggingface.co/spaces/{space_id}"
|
||||
|
@ -134,7 +134,7 @@ def document_fn(fn: Callable, cls) -> tuple[str, list[dict], dict, str | None]:
|
||||
del parameters[param_name]
|
||||
if param.default != inspect.Parameter.empty:
|
||||
default = param.default
|
||||
if type(default) == str:
|
||||
if isinstance(default, str):
|
||||
default = '"' + default + '"'
|
||||
if default.__class__.__module__ != "builtins":
|
||||
default = f"{default.__class__.__name__}()"
|
||||
|
@ -2,9 +2,9 @@
|
||||
|
||||
cd "$(dirname ${0})/.."
|
||||
|
||||
echo "Formatting the client library.. Our style follows the Black code style."
|
||||
echo "Formatting the client library.. Our style follows the ruff code style."
|
||||
python -m ruff --fix .
|
||||
python -m black .
|
||||
python -m ruff format .
|
||||
|
||||
echo "Type checking the client library with pyright"
|
||||
python -m pyright gradio_client/*.py
|
||||
|
@ -4,7 +4,7 @@ cd "$(dirname ${0})/.."
|
||||
|
||||
echo "Linting..."
|
||||
python -m ruff test gradio_client
|
||||
python -m black --check test gradio_client
|
||||
python -m ruff format --check test gradio_client
|
||||
|
||||
echo "Type checking the client library with pyright"
|
||||
python -m pyright gradio_client/*.py
|
||||
|
@ -1,7 +1,6 @@
|
||||
black==23.3.0
|
||||
pytest-asyncio
|
||||
pytest==7.1.2
|
||||
ruff==0.0.264
|
||||
ruff==0.1.7
|
||||
pyright==1.1.327
|
||||
gradio
|
||||
pydub==0.25.1
|
||||
|
@ -109,7 +109,7 @@ def test_strip_invalid_filename_characters(orig_filename, new_filename):
|
||||
|
||||
class AsyncMock(MagicMock):
|
||||
async def __call__(self, *args, **kwargs):
|
||||
return super(AsyncMock, self).__call__(*args, **kwargs)
|
||||
return super().__call__(*args, **kwargs)
|
||||
|
||||
|
||||
@pytest.mark.asyncio
|
||||
|
@ -1311,7 +1311,7 @@ Received inputs:
|
||||
|
||||
dep_outputs = dependency["outputs"]
|
||||
|
||||
if type(predictions) is not list and type(predictions) is not tuple:
|
||||
if not isinstance(predictions, (list, tuple)):
|
||||
predictions = [predictions]
|
||||
|
||||
if len(predictions) < len(dep_outputs):
|
||||
@ -1349,7 +1349,7 @@ Received outputs:
|
||||
dependency = self.dependencies[fn_index]
|
||||
batch = dependency["batch"]
|
||||
|
||||
if type(predictions) is dict and len(predictions) > 0:
|
||||
if isinstance(predictions, dict) and len(predictions) > 0:
|
||||
predictions = convert_component_dict_to_list(
|
||||
dependency["outputs"], predictions
|
||||
)
|
||||
@ -1418,7 +1418,11 @@ Received outputs:
|
||||
f"{block.__class__} Component with id {output_id} not a valid output component."
|
||||
)
|
||||
prediction_value = block.postprocess(prediction_value)
|
||||
outputs_cached = processing_utils.move_files_to_cache(prediction_value, block, postprocess=True) # type: ignore
|
||||
outputs_cached = processing_utils.move_files_to_cache(
|
||||
prediction_value,
|
||||
block, # type: ignore
|
||||
postprocess=True,
|
||||
)
|
||||
output.append(outputs_cached)
|
||||
|
||||
return output
|
||||
|
@ -126,7 +126,8 @@ class ChatInterface(Blocks):
|
||||
if not isinstance(additional_inputs, list):
|
||||
additional_inputs = [additional_inputs]
|
||||
self.additional_inputs = [
|
||||
get_component_instance(i) for i in additional_inputs # type: ignore
|
||||
get_component_instance(i)
|
||||
for i in additional_inputs # type: ignore
|
||||
]
|
||||
else:
|
||||
self.additional_inputs = []
|
||||
|
@ -37,7 +37,9 @@ def _build(
|
||||
pyproject_toml = parse((path / "pyproject.toml").read_text())
|
||||
if bump_version:
|
||||
pyproject_toml = parse((path / "pyproject.toml").read_text())
|
||||
version = semantic_version.Version(pyproject_toml["project"]["version"]).next_patch() # type: ignore
|
||||
version = semantic_version.Version(
|
||||
pyproject_toml["project"]["version"] # type: ignore
|
||||
).next_patch()
|
||||
live.update(
|
||||
f":1234: Using version [bold][magenta]{version}[/][/]. "
|
||||
"Set [bold][magenta]--no-bump-version[/][/] to use the version in pyproject.toml file."
|
||||
|
@ -139,7 +139,9 @@ def _create(
|
||||
print(
|
||||
f":snake: Using requires-python of [bold][magenta]{requires_python}[/][/]"
|
||||
)
|
||||
pyproject_toml["project"]["requires-python"] = requires_python or ">=3.8" # type: ignore
|
||||
pyproject_toml["project"]["requires-python"] = ( # type: ignore
|
||||
requires_python or ">=3.8"
|
||||
)
|
||||
|
||||
keywords = []
|
||||
print(
|
||||
|
@ -18,8 +18,7 @@ def _dev(
|
||||
typer.Argument(
|
||||
help="The path to the app. By default, looks for demo/app.py in the current directory."
|
||||
),
|
||||
] = Path("demo")
|
||||
/ "app.py",
|
||||
] = Path("demo") / "app.py",
|
||||
component_directory: Annotated[
|
||||
Path,
|
||||
typer.Option(
|
||||
|
@ -89,8 +89,7 @@ def _publish(
|
||||
dist_dir: Annotated[
|
||||
Path,
|
||||
Argument(help=f"Path to the wheel directory. Default is {Path('.') / 'dist'}"),
|
||||
] = Path(".")
|
||||
/ "dist",
|
||||
] = Path(".") / "dist",
|
||||
bump_version: Annotated[
|
||||
bool, Option(help="Whether to bump the version number.")
|
||||
] = True,
|
||||
|
@ -275,7 +275,7 @@ class Dataframe(Component):
|
||||
def __process_counts(count, default=3) -> tuple[int, str]:
|
||||
if count is None:
|
||||
return (default, "dynamic")
|
||||
if type(count) == int or type(count) == float:
|
||||
if isinstance(count, (int, float)):
|
||||
return (int(count), "dynamic")
|
||||
else:
|
||||
return count
|
||||
|
@ -415,9 +415,16 @@ def on(
|
||||
if Context.root_block is None:
|
||||
raise Exception("Cannot call on() outside of a gradio.Blocks context.")
|
||||
if triggers is None:
|
||||
triggers = [EventListenerMethod(input, "change") for input in inputs] if inputs is not None else [] # type: ignore
|
||||
triggers = (
|
||||
[EventListenerMethod(input, "change") for input in inputs]
|
||||
if inputs is not None
|
||||
else []
|
||||
) # type: ignore
|
||||
else:
|
||||
triggers = [EventListenerMethod(t.__self__ if t.has_trigger else None, t.event_name) for t in triggers] # type: ignore
|
||||
triggers = [
|
||||
EventListenerMethod(t.__self__ if t.has_trigger else None, t.event_name)
|
||||
for t in triggers
|
||||
] # type: ignore
|
||||
dep, dep_index = Context.root_block.set_event_trigger(
|
||||
triggers,
|
||||
fn,
|
||||
|
@ -189,8 +189,14 @@ def from_model(model_name: str, hf_token: str | None, alias: str | None, **kwarg
|
||||
"postprocess": lambda r: r.json()["text"],
|
||||
},
|
||||
"conversational": {
|
||||
"inputs": [components.Textbox(render=False), components.State(render=False)], # type: ignore
|
||||
"outputs": [components.Chatbot(render=False), components.State(render=False)], # type: ignore
|
||||
"inputs": [
|
||||
components.Textbox(render=False),
|
||||
components.State(render=False),
|
||||
], # type: ignore
|
||||
"outputs": [
|
||||
components.Chatbot(render=False),
|
||||
components.State(render=False),
|
||||
], # type: ignore
|
||||
"preprocess": chatbot_preprocess,
|
||||
"postprocess": chatbot_postprocess,
|
||||
},
|
||||
@ -570,8 +576,8 @@ def from_spaces_interface(
|
||||
len(config["outputs"]) == 1
|
||||
): # if the fn is supposed to return a single value, pop it
|
||||
output = output[0]
|
||||
if len(config["outputs"]) == 1 and isinstance(
|
||||
output, list
|
||||
if (
|
||||
len(config["outputs"]) == 1 and isinstance(output, list)
|
||||
): # Needed to support Output.Image() returning bounding boxes as well (TODO: handle different versions of gradio since they have slightly different APIs)
|
||||
output = output[0]
|
||||
return output
|
||||
|
@ -48,7 +48,7 @@ def get_tabular_examples(model_name: str) -> Dict[str, List[float]]:
|
||||
|
||||
|
||||
def cols_to_rows(
|
||||
example_data: Dict[str, List[float]]
|
||||
example_data: Dict[str, List[float]],
|
||||
) -> Tuple[List[str], List[List[float]]]:
|
||||
headers = list(example_data.keys())
|
||||
n_rows = max(len(example_data[header] or []) for header in headers)
|
||||
|
@ -439,9 +439,7 @@ class HuggingFaceDatasetSaver(FlaggingCallback):
|
||||
if deserialized:
|
||||
path_in_repo = str( # returned filepath is absolute, we want it relative to compute URL
|
||||
Path(deserialized).relative_to(self.dataset_dir)
|
||||
).replace(
|
||||
"\\", "/"
|
||||
)
|
||||
).replace("\\", "/")
|
||||
row.append(
|
||||
huggingface_hub.hf_hub_url(
|
||||
repo_id=self.dataset_id,
|
||||
|
@ -756,8 +756,7 @@ def special_args(
|
||||
if inputs is not None:
|
||||
inputs.insert(i, request)
|
||||
elif (
|
||||
type_hint == Optional[oauth.OAuthProfile]
|
||||
or type_hint == oauth.OAuthProfile
|
||||
type_hint == Optional[oauth.OAuthProfile] or type_hint == oauth.OAuthProfile
|
||||
# Note: "OAuthProfile | None" is equals to Optional[OAuthProfile] in Python
|
||||
# => it is automatically handled as well by the above condition
|
||||
# (adding explicit "OAuthProfile | None" would break in Python3.9)
|
||||
|
@ -29,12 +29,18 @@ def format_image(
|
||||
elif type == "filepath":
|
||||
try:
|
||||
path = processing_utils.save_pil_to_cache(
|
||||
im, cache_dir=cache_dir, name=name, format=fmt or format # type: ignore
|
||||
im,
|
||||
cache_dir=cache_dir,
|
||||
name=name,
|
||||
format=fmt or format, # type: ignore
|
||||
)
|
||||
# Catch error if format is not supported by PIL
|
||||
except (KeyError, ValueError):
|
||||
path = processing_utils.save_pil_to_cache(
|
||||
im, cache_dir=cache_dir, name=name, format="png" # type: ignore
|
||||
im,
|
||||
cache_dir=cache_dir,
|
||||
name=name,
|
||||
format="png", # type: ignore
|
||||
)
|
||||
return path
|
||||
else:
|
||||
@ -54,7 +60,9 @@ def save_image(y: np.ndarray | _Image.Image | str | Path, cache_dir: str):
|
||||
fmt = y.format
|
||||
try:
|
||||
path = processing_utils.save_pil_to_cache(
|
||||
y, cache_dir=cache_dir, format=fmt # type: ignore
|
||||
y,
|
||||
cache_dir=cache_dir,
|
||||
format=fmt, # type: ignore
|
||||
)
|
||||
# Catch error if format is not supported by PIL
|
||||
except (KeyError, ValueError):
|
||||
|
@ -208,10 +208,12 @@ class Interface(Blocks):
|
||||
self.cache_examples = False
|
||||
|
||||
self.input_components = [
|
||||
get_component_instance(i, unrender=True) for i in inputs # type: ignore
|
||||
get_component_instance(i, unrender=True)
|
||||
for i in inputs # type: ignore
|
||||
]
|
||||
self.output_components = [
|
||||
get_component_instance(o, unrender=True) for o in outputs # type: ignore
|
||||
get_component_instance(o, unrender=True)
|
||||
for o in outputs # type: ignore
|
||||
]
|
||||
|
||||
for component in self.input_components + self.output_components:
|
||||
@ -355,7 +357,8 @@ class Interface(Blocks):
|
||||
pass
|
||||
else:
|
||||
self.flagging_callback.setup(
|
||||
self.input_components + self.output_components, self.flagging_dir # type: ignore
|
||||
self.input_components + self.output_components,
|
||||
self.flagging_dir, # type: ignore
|
||||
)
|
||||
|
||||
# Render the Gradio UI
|
||||
@ -649,9 +652,7 @@ class Interface(Blocks):
|
||||
clear_btn.click(
|
||||
None,
|
||||
[],
|
||||
(
|
||||
[input_component_column] if input_component_column else []
|
||||
), # type: ignore
|
||||
([input_component_column] if input_component_column else []), # type: ignore
|
||||
js=f"""() => {json.dumps(
|
||||
|
||||
[{'variant': None, 'visible': True, '__type__': 'update'}]
|
||||
|
@ -192,7 +192,8 @@ def load_from_pipeline(pipeline: pipelines.base.Pipeline) -> dict:
|
||||
"postprocess": lambda r: {i["answer"]: i["score"] for i in r},
|
||||
}
|
||||
elif hasattr(transformers, "ImageToTextPipeline") and isinstance(
|
||||
pipeline, pipelines.image_to_text.ImageToTextPipeline # type: ignore
|
||||
pipeline,
|
||||
pipelines.image_to_text.ImageToTextPipeline, # type: ignore
|
||||
):
|
||||
pipeline_info = {
|
||||
"inputs": components.Image(
|
||||
|
@ -597,7 +597,9 @@ def _convert(image, dtype, force_copy=False, uniform=False):
|
||||
image_out = np.multiply(image, imax_out, dtype=computation_type) # type: ignore
|
||||
else:
|
||||
image_out = np.multiply(
|
||||
image, (imax_out - imin_out) / 2, dtype=computation_type # type: ignore
|
||||
image,
|
||||
(imax_out - imin_out) / 2, # type: ignore
|
||||
dtype=computation_type,
|
||||
)
|
||||
image_out -= 1.0 / 2.0
|
||||
np.rint(image_out, out=image_out)
|
||||
@ -607,7 +609,9 @@ def _convert(image, dtype, force_copy=False, uniform=False):
|
||||
np.clip(image_out, 0, imax_out, out=image_out) # type: ignore
|
||||
else:
|
||||
image_out = np.multiply(
|
||||
image, (imax_out - imin_out + 1.0) / 2.0, dtype=computation_type # type: ignore
|
||||
image,
|
||||
(imax_out - imin_out + 1.0) / 2.0, # type: ignore
|
||||
dtype=computation_type,
|
||||
)
|
||||
np.floor(image_out, out=image_out)
|
||||
np.clip(image_out, imin_out, imax_out, out=image_out) # type: ignore
|
||||
|
@ -383,7 +383,9 @@ class GradioMultiPartParser:
|
||||
message_bytes = data[start:end]
|
||||
if self.upload_progress is not None:
|
||||
self.upload_progress.update(
|
||||
self.upload_id, self._current_part.file.filename, message_bytes # type: ignore
|
||||
self.upload_id, # type: ignore
|
||||
self._current_part.file.filename, # type: ignore
|
||||
message_bytes,
|
||||
)
|
||||
if self._current_part.file is None:
|
||||
self._current_part.data += message_bytes
|
||||
@ -463,7 +465,7 @@ class GradioMultiPartParser:
|
||||
# Parse the Content-Type header to get the multipart boundary.
|
||||
_, params = parse_options_header(self.headers["Content-Type"])
|
||||
charset = params.get(b"charset", "utf-8")
|
||||
if type(charset) == bytes:
|
||||
if isinstance(charset, bytes):
|
||||
charset = charset.decode("latin-1")
|
||||
self._charset = charset
|
||||
try:
|
||||
|
@ -301,8 +301,7 @@ class Microphone(components.Audio):
|
||||
self,
|
||||
value: str | tuple[int, np.ndarray] | Callable | None = None,
|
||||
*,
|
||||
sources: list[Literal["upload", "microphone"]]
|
||||
| None = ["microphone"], # noqa: B006
|
||||
sources: list[Literal["upload", "microphone"]] | None = ["microphone"], # noqa: B006
|
||||
type: Literal["numpy", "filepath"] = "numpy",
|
||||
label: str | None = None,
|
||||
show_label: bool = True,
|
||||
|
@ -328,17 +328,13 @@ class Base(ThemeClass):
|
||||
text_size: sizes.Size | str = sizes.text_md,
|
||||
spacing_size: sizes.Size | str = sizes.spacing_md,
|
||||
radius_size: sizes.Size | str = sizes.radius_md,
|
||||
font: fonts.Font
|
||||
| str
|
||||
| Iterable[fonts.Font | str] = (
|
||||
font: fonts.Font | str | Iterable[fonts.Font | str] = (
|
||||
fonts.GoogleFont("Source Sans Pro"),
|
||||
"ui-sans-serif",
|
||||
"system-ui",
|
||||
"sans-serif",
|
||||
),
|
||||
font_mono: fonts.Font
|
||||
| str
|
||||
| Iterable[fonts.Font | str] = (
|
||||
font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
|
||||
fonts.GoogleFont("IBM Plex Mono"),
|
||||
"ui-monospace",
|
||||
"Consolas",
|
||||
|
@ -708,56 +708,40 @@ with gr.Blocks(theme=theme) as demo:
|
||||
3 + 3 * len(palette_range) : 6 + 3 * len(palette_range)
|
||||
]
|
||||
text_sizes = args[
|
||||
6
|
||||
+ 3 * len(palette_range) : 6
|
||||
6 + 3 * len(palette_range) : 6
|
||||
+ 3 * len(palette_range)
|
||||
+ len(size_range)
|
||||
]
|
||||
spacing_sizes = args[
|
||||
6
|
||||
+ 3 * len(palette_range)
|
||||
+ len(size_range) : 6
|
||||
6 + 3 * len(palette_range) + len(size_range) : 6
|
||||
+ 3 * len(palette_range)
|
||||
+ 2 * len(size_range)
|
||||
]
|
||||
radius_sizes = args[
|
||||
6
|
||||
+ 3 * len(palette_range)
|
||||
+ 2 * len(size_range) : 6
|
||||
6 + 3 * len(palette_range) + 2 * len(size_range) : 6
|
||||
+ 3 * len(palette_range)
|
||||
+ 3 * len(size_range)
|
||||
]
|
||||
main_fonts = args[
|
||||
6
|
||||
+ 3 * len(palette_range)
|
||||
+ 3 * len(size_range) : 6
|
||||
6 + 3 * len(palette_range) + 3 * len(size_range) : 6
|
||||
+ 3 * len(palette_range)
|
||||
+ 3 * len(size_range)
|
||||
+ 4
|
||||
]
|
||||
main_is_google = args[
|
||||
6
|
||||
+ 3 * len(palette_range)
|
||||
+ 3 * len(size_range)
|
||||
+ 4 : 6
|
||||
6 + 3 * len(palette_range) + 3 * len(size_range) + 4 : 6
|
||||
+ 3 * len(palette_range)
|
||||
+ 3 * len(size_range)
|
||||
+ 8
|
||||
]
|
||||
mono_fonts = args[
|
||||
6
|
||||
+ 3 * len(palette_range)
|
||||
+ 3 * len(size_range)
|
||||
+ 8 : 6
|
||||
6 + 3 * len(palette_range) + 3 * len(size_range) + 8 : 6
|
||||
+ 3 * len(palette_range)
|
||||
+ 3 * len(size_range)
|
||||
+ 12
|
||||
]
|
||||
mono_is_google = args[
|
||||
6
|
||||
+ 3 * len(palette_range)
|
||||
+ 3 * len(size_range)
|
||||
+ 12 : 6
|
||||
6 + 3 * len(palette_range) + 3 * len(size_range) + 12 : 6
|
||||
+ 3 * len(palette_range)
|
||||
+ 3 * len(size_range)
|
||||
+ 16
|
||||
|
@ -16,17 +16,13 @@ class Default(Base):
|
||||
spacing_size: sizes.Size | str = sizes.spacing_md,
|
||||
radius_size: sizes.Size | str = sizes.radius_md,
|
||||
text_size: sizes.Size | str = sizes.text_md,
|
||||
font: fonts.Font
|
||||
| str
|
||||
| Iterable[fonts.Font | str] = (
|
||||
font: fonts.Font | str | Iterable[fonts.Font | str] = (
|
||||
fonts.GoogleFont("Source Sans Pro"),
|
||||
"ui-sans-serif",
|
||||
"system-ui",
|
||||
"sans-serif",
|
||||
),
|
||||
font_mono: fonts.Font
|
||||
| str
|
||||
| Iterable[fonts.Font | str] = (
|
||||
font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
|
||||
fonts.GoogleFont("IBM Plex Mono"),
|
||||
"ui-monospace",
|
||||
"Consolas",
|
||||
|
@ -16,18 +16,14 @@ class Glass(Base):
|
||||
spacing_size: sizes.Size | str = sizes.spacing_sm,
|
||||
radius_size: sizes.Size | str = sizes.radius_sm,
|
||||
text_size: sizes.Size | str = sizes.text_sm,
|
||||
font: fonts.Font
|
||||
| str
|
||||
| Iterable[fonts.Font | str] = (
|
||||
font: fonts.Font | str | Iterable[fonts.Font | str] = (
|
||||
"Optima",
|
||||
"Candara",
|
||||
"Noto Sans",
|
||||
"source-sans-pro",
|
||||
"sans-serif",
|
||||
),
|
||||
font_mono: fonts.Font
|
||||
| str
|
||||
| Iterable[fonts.Font | str] = (
|
||||
font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
|
||||
fonts.GoogleFont("IBM Plex Mono"),
|
||||
"ui-monospace",
|
||||
"Consolas",
|
||||
|
@ -16,17 +16,13 @@ class Monochrome(Base):
|
||||
spacing_size: sizes.Size | str = sizes.spacing_lg,
|
||||
radius_size: sizes.Size | str = sizes.radius_none,
|
||||
text_size: sizes.Size | str = sizes.text_md,
|
||||
font: fonts.Font
|
||||
| str
|
||||
| Iterable[fonts.Font | str] = (
|
||||
font: fonts.Font | str | Iterable[fonts.Font | str] = (
|
||||
fonts.GoogleFont("Quicksand"),
|
||||
"ui-sans-serif",
|
||||
"system-ui",
|
||||
"sans-serif",
|
||||
),
|
||||
font_mono: fonts.Font
|
||||
| str
|
||||
| Iterable[fonts.Font | str] = (
|
||||
font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
|
||||
fonts.GoogleFont("IBM Plex Mono"),
|
||||
"ui-monospace",
|
||||
"Consolas",
|
||||
|
@ -16,17 +16,13 @@ class Soft(Base):
|
||||
spacing_size: sizes.Size | str = sizes.spacing_md,
|
||||
radius_size: sizes.Size | str = sizes.radius_md,
|
||||
text_size: sizes.Size | str = sizes.text_md,
|
||||
font: fonts.Font
|
||||
| str
|
||||
| Iterable[fonts.Font | str] = (
|
||||
font: fonts.Font | str | Iterable[fonts.Font | str] = (
|
||||
fonts.GoogleFont("Montserrat"),
|
||||
"ui-sans-serif",
|
||||
"system-ui",
|
||||
"sans-serif",
|
||||
),
|
||||
font_mono: fonts.Font
|
||||
| str
|
||||
| Iterable[fonts.Font | str] = (
|
||||
font_mono: fonts.Font | str | Iterable[fonts.Font | str] = (
|
||||
fonts.GoogleFont("IBM Plex Mono"),
|
||||
"ui-monospace",
|
||||
"Consolas",
|
||||
|
@ -720,7 +720,7 @@ def set_task_name(task, session_hash: str, fn_index: int, batch: bool):
|
||||
|
||||
|
||||
def get_cancel_function(
|
||||
dependencies: list[dict[str, Any]]
|
||||
dependencies: list[dict[str, Any]],
|
||||
) -> tuple[Callable, list[int]]:
|
||||
fn_to_comp = {}
|
||||
for dep in dependencies:
|
||||
|
@ -2,10 +2,10 @@
|
||||
|
||||
cd "$(dirname ${0})/.."
|
||||
|
||||
echo "Formatting the backend... Our style follows the Black code style."
|
||||
echo "Formatting the backend... Our style follows the ruff code style."
|
||||
python -c "import gradio"
|
||||
ruff --fix gradio test
|
||||
black gradio test
|
||||
python -m ruff --fix gradio test
|
||||
python -m ruff format gradio test
|
||||
bash scripts/type_check_backend.sh
|
||||
|
||||
bash client/python/scripts/format.sh # Call the client library's formatting script
|
||||
|
@ -3,4 +3,4 @@
|
||||
cd "$(dirname ${0})/.."
|
||||
python -c "import gradio"
|
||||
python -m ruff gradio test client
|
||||
python -m black --check gradio test client
|
||||
python -m ruff format --check gradio test client
|
||||
|
@ -2,7 +2,6 @@
|
||||
IPython
|
||||
altair
|
||||
asyncio
|
||||
black
|
||||
boto3
|
||||
coverage
|
||||
fastapi>=0.101.0
|
||||
@ -13,7 +12,7 @@ pydantic
|
||||
pytest
|
||||
pytest-asyncio
|
||||
pytest-cov
|
||||
ruff>=0.0.260
|
||||
ruff>=0.1.7
|
||||
respx
|
||||
scikit-image
|
||||
shap
|
||||
|
@ -18,7 +18,6 @@ attrs==21.4.0
|
||||
# pytest
|
||||
backcall==0.2.0
|
||||
# via ipython
|
||||
black==23.3.0
|
||||
# via -r requirements.in
|
||||
boto3==1.26.65
|
||||
# via -r requirements.in
|
||||
@ -33,8 +32,6 @@ certifi==2022.6.15
|
||||
# requests
|
||||
charset-normalizer==2.1.0
|
||||
# via requests
|
||||
click==8.1.3
|
||||
# via black
|
||||
cloudpickle==2.1.0
|
||||
# via shap
|
||||
decorator==5.1.1
|
||||
@ -88,8 +85,6 @@ markupsafe==2.1.1
|
||||
# via jinja2
|
||||
matplotlib-inline==0.1.3
|
||||
# via ipython
|
||||
mypy-extensions==0.4.3
|
||||
# via black
|
||||
networkx==2.6.3
|
||||
# via scikit-image
|
||||
numba==0.55.2
|
||||
@ -121,8 +116,6 @@ pandas==1.5.3
|
||||
# vega-datasets
|
||||
parso==0.8.3
|
||||
# via jedi
|
||||
pathspec==0.9.0
|
||||
# via black
|
||||
pexpect==4.8.0
|
||||
# via ipython
|
||||
pickleshare==0.7.5
|
||||
@ -131,8 +124,6 @@ pillow==9.2.0
|
||||
# via
|
||||
# imageio
|
||||
# scikit-image
|
||||
platformdirs==2.5.2
|
||||
# via black
|
||||
pluggy==1.0.0
|
||||
# via pytest
|
||||
prompt-toolkit==3.0.30
|
||||
@ -175,7 +166,7 @@ requests==2.28.1
|
||||
# transformers
|
||||
respx==0.19.2
|
||||
# via -r requirements.in
|
||||
ruff==0.0.264
|
||||
ruff==0.1.7
|
||||
# via -r requirements.in
|
||||
rfc3986[idna2008]==1.5.0
|
||||
# via httpx
|
||||
@ -211,7 +202,6 @@ tokenizers==0.12.1
|
||||
# via transformers
|
||||
tomli==2.0.1
|
||||
# via
|
||||
# black
|
||||
# pytest
|
||||
toolz==0.12.0
|
||||
# via altair
|
||||
|
@ -890,7 +890,7 @@ class TestFile:
|
||||
|
||||
file_input = gr.File(type="binary")
|
||||
output = file_input.preprocess(x_file)
|
||||
assert type(output) == bytes
|
||||
assert isinstance(output, bytes)
|
||||
|
||||
output1 = file_input.postprocess("test/test_files/sample_file.pdf")
|
||||
output2 = file_input.postprocess("test/test_files/sample_file.pdf")
|
||||
|
@ -58,11 +58,14 @@ class TestQueueing:
|
||||
if queue_size != sizes[-1]:
|
||||
sizes.append(queue_size)
|
||||
|
||||
assert max(sizes) in [
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
] # Can be 2 - 4, depending on if the workers have picked up jobs before the queue status is checked
|
||||
assert (
|
||||
max(sizes)
|
||||
in [
|
||||
2,
|
||||
3,
|
||||
4,
|
||||
]
|
||||
) # Can be 2 - 4, depending on if the workers have picked up jobs before the queue status is checked
|
||||
|
||||
assert min(sizes) == 0
|
||||
assert sizes[-1] == 0
|
||||
|
Loading…
x
Reference in New Issue
Block a user