Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Switch to pyproject.toml for specifying dependencies #2052

Closed
wants to merge 7 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions codecov.yml
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@ ignore:
- "examples"
- "tests"
- "tools"
- "setup.py"

#codecov:
# notify:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -301,7 +301,7 @@ def _mobilenet_v3_model(
last_channel: int,
pretrained: bool,
progress: bool,
**kwargs: Any
**kwargs: Any,
):
model = MobileNetV3(inverted_residual_setting, last_channel, **kwargs)
if pretrained:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ def __init__(
base_lr: float,
num_epochs: float,
warmup_epochs: float = 0,
warmup_lr: float = 3.4e-4
warmup_lr: float = 3.4e-4,
):
super().__init__(optimizer, num_steps_in_epoch)
self._base_lr = base_lr
Expand Down
93 changes: 93 additions & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,93 @@
[project]
name = "nncf"
authors = [
{ name = "Intel Corporation", email = "[email protected]" },
]
description = "Neural Networks Compression Framework"
readme = "README.md"
requires-python = ">=3.7"
keywords = [
"compression",
"quantization",
"sparsity",
"mixed-precision-training",
"quantization-aware-training",
"hawq",
"classification",
"pruning",
"object-detection",
"semantic-segmentation",
"nas",
"nlp",
"bert",
"transformers",
"mmdetection"
]
license = { text = "Apache-2.0" }
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Intended Audience :: Information Technology",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules"
]
dependencies = [
"ninja>=1.10.0.post2, <1.11",
"texttable>=1.6.3",
"scipy>=1.3.2, <1.11",
"networkx>=2.6, <=2.8.2", # see ticket 94048 or https://github.com/networkx/networkx/issues/5962
"numpy>=1.19.1, <1.25",
# The recent pyparsing major version update seems to break
# integration with networkx - the graphs parsed from current .dot
# reference files no longer match against the graphs produced in tests.
# Using 2.x versions of pyparsing seems to fix the issue.
# Ticket: 69520
"packaging>=20.0",
"pyparsing<3.0",
"pymoo @ git+https://github.com/anyoptimization/pymoo.git@695cb26923903f872c7256a9013609769f3cc2bd",
"jsonschema>=3.2.0",
"pydot>=1.4.1",
"jstyleson>=0.0.2",
"tqdm>=4.54.1",
"natsort>=7.1.0",
"pandas>=1.1.5,<2.1",
"scikit-learn>=0.24.0",
"openvino-telemetry>=2023.1.0",
"psutil"
]
dynamic = ["version"]

[project.urls]
homepage = "https://openvinotoolkit.github.io/nncf/"
repository = "https://github.com/openvinotoolkit/nncf"

[project.optional-dependencies]
torch = ["torch>=1.13.0,<2.1;python_version < '3.11'"]
tf = ["tensorflow~=2.12.0", "tensorflow-metadata<=1.13.0"]
onnx = ["onnx~=1.13.1", "onnxruntime~=1.14.1;python_version < '3.11'"]
openvino = ["openvino==2023.0.1"]
dev = [
"black==23.3.0",
"isort==5.12.0",
"kaleido>=0.2.1",
"matplotlib>=3.3.4, <3.6",
"pillow>=9.0.0",
"plotly-express>=0.4.1",
"pre-commit==3.2.2"
]
tests = ["pytest"]
docs = []
tensorflow = ["nncf[tf]"]
tensorflow2 = ["nncf[tf]"]
pytorch = ["nncf[torch]"]
all = ["nncf[torch,tf,openvino,onnx]"]

[tool.setuptools.packages.find]
where = ["nncf"]

[build-system]
requires = ["setuptools>=59.5"]
1 change: 0 additions & 1 deletion requirements.txt

This file was deleted.

141 changes: 9 additions & 132 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,16 +20,19 @@
# +-------------------------------------+---------------------------------------------+
# | python setup.py install | pip install . |
# | python setup.py develop | pip install -e . |
# | python setup.py develop --*arg* | pip install --install-option="*arg*" -e . |
# | python setup.py develop --*arg* | pip install --install-option="--*arg*" -e .| <-- removed since pip 23.x
# | python setup.py sdist | python -m build -s | <-- using the "build" package
# | python setup.py bdist_wheel | python -m build -w | <-- pypi.org/project/build/
# | python setup.py bdist_wheel --*arg* | python -m build -w -C--global-option=--*arg*|
# +-------------------------------------+---------------------------------------------+
#
# PyPA in general recommends to move away from setup.py and use pyproject.toml
# instead. This doesn't fit us as we currently want to do custom stuff during
# installation such as setting version based on the commit SHA for repo-based
# installs.
# The majority of the usual setup.py-related metadata is now in the pyproject.toml,
# but the PSF/PyPA/whoever still can't tackle the simple problem of allowing to specify custom, dynamic versions
# via pyproject.toml only.
# Obscure PR discussions such as https://github.com/pypa/setuptools/pull/3885
# reveal that setup.py can actually still stay, and shall be called even considering the pyproject.toml, so
# it is currently to be used solely for setting version based on the commit SHA
# for repo-based installs.


import codecs
Expand All @@ -40,28 +43,9 @@
import sys
import sysconfig

import setuptools
from pkg_resources import parse_version
from setuptools import find_packages
from setuptools import setup

here = os.path.abspath(os.path.dirname(__file__))
BKC_SETUPTOOLS_VERSION = "59.5.0"

setuptools_version = parse_version(setuptools.__version__).base_version
if setuptools_version < "43.0.0":
raise RuntimeError(
"To properly install NNCF, please install setuptools>=43.0.0, "
f"while current setuptools version is {setuptools.__version__}. "
f"Recommended version is {BKC_SETUPTOOLS_VERSION}."
)

python_version = sys.version_info
if python_version < (3, 7, 0):
print("Only Python >= 3.7.0 is supported")
sys.exit(0)

version_string = "{}{}".format(sys.version_info[0], sys.version_info[1])

is_installing_editable = "develop" in sys.argv
is_building_release = not is_installing_editable and "--release" in sys.argv
Expand Down Expand Up @@ -100,114 +84,7 @@ def find_version(*file_paths):
return version_value


INSTALL_REQUIRES = [
"jsonschema>=3.2.0",
"jstyleson>=0.0.2",
"natsort>=7.1.0",
"networkx>=2.6, <=2.8.2", # see ticket 94048 or https://github.com/networkx/networkx/issues/5962
"ninja>=1.10.0.post2, <1.11",
"numpy>=1.19.1, <1.25",
"openvino-telemetry>=2023.1.1",
"packaging>=20.0",
"pandas>=1.1.5,<2.1",
"psutil",
"pydot>=1.4.1",
"pymoo @ git+https://github.com/anyoptimization/pymoo.git@695cb26923903f872c7256a9013609769f3cc2bd",
# The recent pyparsing major version update seems to break
# integration with networkx - the graphs parsed from current .dot
# reference files no longer match against the graphs produced in tests.
# Using 2.x versions of pyparsing seems to fix the issue.
# Ticket: 69520
"pyparsing<3.0",
"scikit-learn>=0.24.0",
"scipy>=1.3.2, <1.11",
"texttable>=1.6.3",
"tqdm>=4.54.1",
]


TF_EXTRAS = [
"tensorflow~=2.12.0",
# This is required for support of TF 2.8.4 which needs protobuf<=3.19.6
"tensorflow-metadata<=1.13.0",
]

TORCH_EXTRAS = [
"torch>=1.13.0,<2.1;python_version < '3.11'",
]

ONNX_EXTRAS = ["onnx~=1.13.1", "onnxruntime~=1.14.1;python_version < '3.11'"]

OPENVINO_EXTRAS = ["openvino==2023.0.1"]


EXTRAS_REQUIRE = {
"dev": [
"black==23.3.0",
"isort==5.12.0",
"kaleido>=0.2.1",
"matplotlib>=3.3.4, <3.6",
"pillow>=9.0.0",
"plotly-express>=0.4.1",
"pre-commit==3.2.2",
],
"tests": ["pytest"],
"docs": [],
"tf": TF_EXTRAS,
"tensorflow": TF_EXTRAS,
"tensorflow2": TF_EXTRAS,
"torch": TORCH_EXTRAS,
"pytorch": TORCH_EXTRAS,
"onnx": ONNX_EXTRAS,
"openvino": OPENVINO_EXTRAS,
"all": [
TF_EXTRAS,
TORCH_EXTRAS,
ONNX_EXTRAS,
OPENVINO_EXTRAS,
],
}

with open("{}/README.md".format(here), "r", encoding="utf8") as fh:
long_description = fh.read()

setup(
name="nncf",
version=find_version(os.path.join(here, "nncf/version.py")),
author="Intel",
author_email="[email protected]",
description="Neural Networks Compression Framework",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/openvinotoolkit/nncf",
license="Apache-2.0",
packages=find_packages(exclude=["tests", "tests.*", "examples", "examples.*", "tools", "tools.*"]),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
install_requires=INSTALL_REQUIRES,
extras_require=EXTRAS_REQUIRE,
keywords=[
"compression",
"quantization",
"sparsity",
"mixed-precision-training",
"quantization-aware-training",
"hawq",
"classification",
"pruning",
"object-detection",
"semantic-segmentation",
"nas",
"nlp",
"bert",
"transformers",
"mmdetection",
],
include_package_data=True,
)
setup(version=find_version(os.path.join(here, "nncf/version.py")))

path_to_ninja = glob.glob(str(sysconfig.get_paths()["purelib"] + "/ninja*/ninja/data/bin/"))
if path_to_ninja:
Expand Down
10 changes: 5 additions & 5 deletions tests/experimental/tensorflow/test_models/resnet.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ def __init__(
activation="relu",
gating_activation="sigmoid",
round_down_protect=True,
**kwargs
**kwargs,
):
"""Initializes a squeeze and excitation layer.

Expand Down Expand Up @@ -291,7 +291,7 @@ def __init__(
norm_momentum=0.99,
norm_epsilon=0.001,
bn_trainable=True,
**kwargs
**kwargs,
):
"""Initializes a residual block with BN after convolutions.

Expand Down Expand Up @@ -490,7 +490,7 @@ def __init__(
norm_momentum=0.99,
norm_epsilon=0.001,
bn_trainable=True,
**kwargs
**kwargs,
):
"""Initializes a standard bottleneck block with BN after convolutions.

Expand Down Expand Up @@ -798,7 +798,7 @@ def __init__(
kernel_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bias_regularizer: Optional[tf.keras.regularizers.Regularizer] = None,
bn_trainable: bool = True,
**kwargs
**kwargs,
):
"""Initializes a ResNet model.

Expand Down Expand Up @@ -1066,7 +1066,7 @@ def __init__(
norm_momentum: float = 0.99,
norm_epsilon: float = 0.001,
skip_logits_layer: bool = False,
**kwargs
**kwargs,
):
"""Classification initialization function.

Expand Down