forked from huggingface/optimum-tpu
-
Notifications
You must be signed in to change notification settings - Fork 2
/
pyproject.toml
99 lines (88 loc) · 3 KB
/
pyproject.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
[project]
name = "optimum-tpu"
dynamic = ["version"]
authors=[
{ name = "HuggingFace Inc. Machine Learning Optimization Team", email = "[email protected]"}
]
description = "Optimum TPU is the interface between the Hugging Face Transformers library and Google Cloud TPU devices."
readme = "README.md"
license = {file = "LICENSE"}
classifiers = [
"Development Status :: 2 - Pre-Alpha",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Developers",
"Intended Audience :: Education",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
]
keywords = [
"transformers",
"fine-tuning",
"inference",
"tpu",
"cloud-tpu",
"gcp",
"google-cloud"
]
dependencies = [
"transformers == 4.41.1",
"torch == 2.4.0",
"torch-xla[tpu] == 2.4.0",
"loguru == 0.6.0",
"sentencepiece == 0.2.0",
]
[tool.setuptools_scm]
[build-system]
requires = ["setuptools>=64", "setuptools_scm>=8"]
build-backend = "setuptools.build_meta"
[project.optional-dependencies]
tests = ["pytest", "safetensors"]
quality = ["black", "ruff", "isort"]
# Jetstream/Pytorch support is experimental for now, requires installation from fixed commit.
# Pallas is pulled because it will install a compatible version of jax[tpu].
jetstream-pt = [
"jetstream-pt @ git+https://github.com/google/jetstream-pytorch.git@ec4ac8f6b180ade059a2284b8b7d843b3cab0921",
"torch-xla[pallas] == 2.4.0"
]
[project.urls]
Homepage = "https://hf.co/hardware"
Documentation = "https://hf.co/docs/optimum/tpu"
Repository = "https://github.com/huggingface/optimum-tpu"
Issues = "https://github.com/huggingface/optimum-tpu/issues"
[tool.setuptools.packages.find]
include = ["optimum.tpu*"]
[tool.black]
line-length = 119
target-version = ['py38']
extend-exclude = '.ipynb'
[tool.ruff]
# Never enforce `E501` (line length violations).
lint.ignore = ["C901", "E501", "E741", "W605"]
lint.select = ["C", "E", "F", "I", "W"]
line-length = 119
# Ignore import violations in all `__init__.py` files.
[tool.ruff.lint.per-file-ignores]
"__init__.py" = ["E402", "F401", "F403", "F811"]
[tool.ruff.lint.isort]
lines-after-imports = 2
known-first-party = ["optimum.tpu"]
[tool.pytest.ini_options]
markers = [
"is_staging_test",
]