Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

define Yapf config #5591

Merged
merged 8 commits into from
Jan 28, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 15 additions & 15 deletions .github/workflows/code-formatting.yml
Original file line number Diff line number Diff line change
Expand Up @@ -22,21 +22,21 @@ jobs:
- name: Run isort
run: isort --settings-path=./pyproject.toml --check-only --diff .

#code-black:
# name: Check code formatting with Black
# runs-on: ubuntu-20.04
# steps:
# - name: Checkout
# uses: actions/checkout@v2
# - name: Set up Python 3.8
# uses: actions/setup-python@v2
# with:
# python-version: 3.8
# - name: Install Black
# run: pip install black==19.10b0
# - name: Run Black
# run: echo "LGTM"
# run black --skip-string-normalization --config=pyproject.toml --check . # TODO, uncomment
format-check-yapf:
runs-on: ubuntu-20.04
steps:
- uses: actions/checkout@master
- uses: actions/setup-python@v2
with:
python-version: 3.8
- name: Install dependencies
run: |
pip install --upgrade pip
pip install yapf
pip list
shell: bash
- name: yapf
run: yapf --diff --parallel --recursive .

python-pep8:
name: Python formatting PEP8
Expand Down
10 changes: 10 additions & 0 deletions .yapfignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,10 @@
.git/*

# TODO
pl_examples/*

# TODO
pytorch_lightning/*

# TODO
tests/*
1 change: 1 addition & 0 deletions MANIFEST.in
Original file line number Diff line number Diff line change
Expand Up @@ -54,6 +54,7 @@ include pyproject.toml
exclude *.yml
exclude *.yaml
exclude *.jsonnet
exclude .yapfignore

# Exclude pyright config
exclude .pyrightconfig.json
Expand Down
23 changes: 13 additions & 10 deletions benchmarks/test_basic_parity.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,18 +42,21 @@ def assert_parity_absolute(pl_values, pt_values, norm_by: float = 1, max_diff: f


# ParityModuleMNIST runs with num_workers=1
@pytest.mark.parametrize('cls_model,max_diff_speed,max_diff_memory', [
(ParityModuleRNN, 0.05, 0.0),
(ParityModuleMNIST, 0.25, 0.0), # todo: lower this thr
])
@pytest.mark.parametrize(
'cls_model,max_diff_speed,max_diff_memory',
[
(ParityModuleRNN, 0.05, 0.0),
(ParityModuleMNIST, 0.25, 0.0), # todo: lower this thr
]
)
@pytest.mark.skipif(not torch.cuda.is_available(), reason="test requires GPU machine")
def test_pytorch_parity(
tmpdir,
cls_model: LightningModule,
max_diff_speed: float,
max_diff_memory: float,
num_epochs: int = 4,
num_runs: int = 3,
tmpdir,
cls_model: LightningModule,
max_diff_speed: float,
max_diff_memory: float,
num_epochs: int = 4,
num_runs: int = 3,
):
"""
Verify that the same pytorch and lightning models achieve the same results
Expand Down
38 changes: 18 additions & 20 deletions benchmarks/test_sharded_parity.py
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,9 @@ def test_ddp_string_sharded_plugin_correctness_amp_multi_gpu():

@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
@pytest.mark.skipif(not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1',
reason="test should be run outside of pytest")
@pytest.mark.skipif(
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
)
@DDPLauncher.run("--accelerator ddp --gpus 2 --precision 32")
def test_ddp_sharded_plugin_correctness_multi_gpu_ddp(tmpdir, args=None):
plugin_parity_test(
Expand All @@ -115,8 +116,9 @@ def test_ddp_sharded_plugin_correctness_multi_gpu_ddp(tmpdir, args=None):

@pytest.mark.skipif(not _FAIRSCALE_AVAILABLE, reason="Fairscale is not available")
@pytest.mark.skipif(torch.cuda.device_count() < 2, reason="test requires multi-GPU machine")
@pytest.mark.skipif(not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1',
reason="test should be run outside of pytest")
@pytest.mark.skipif(
not os.getenv("PL_RUNNING_SPECIAL_TESTS", '0') == '1', reason="test should be run outside of pytest"
)
@DDPLauncher.run("--accelerator ddp --gpus 2 --precision 16")
def test_ddp_sharded_plugin_correctness_amp_multi_gpu_ddp(tmpdir, args=None):
plugin_parity_test(
Expand Down Expand Up @@ -173,6 +175,7 @@ def train_dataloader(self):


class SeedTrainLoaderManualModel(SeedTrainLoaderModel):

def training_step(self, batch, batch_idx, optimizer_idx):
# manual
# access your optimizers with use_pl_optimizer=False. Default is True
Expand Down Expand Up @@ -209,6 +212,7 @@ def automatic_optimization(self) -> bool:


class SeedTrainLoaderMultipleOptimizersModel(SeedTrainLoaderModel):

def training_step(self, batch, batch_idx, optimizer_idx):
output = self.layer(batch)
loss = self.loss(batch, output)
Expand Down Expand Up @@ -247,21 +251,21 @@ def record_ddp_fit_model_stats(trainer, model, use_cuda):

if use_cuda:
torch.cuda.synchronize()
max_memory = torch.cuda.max_memory_allocated() / 2 ** 20
max_memory = torch.cuda.max_memory_allocated() / 2**20

total_time = time.perf_counter() - time_start

return max_memory, total_time


def plugin_parity_test(
model_cls: Type[SeedTrainLoaderModel],
plugin: Union[str, DDPPlugin],
seed: int = 42,
accelerator: str = 'ddp_spawn',
gpus: int = 0,
precision: int = 32,
max_percent_speed_diff: float = 0.1,
model_cls: Type[SeedTrainLoaderModel],
plugin: Union[str, DDPPlugin],
seed: int = 42,
accelerator: str = 'ddp_spawn',
gpus: int = 0,
precision: int = 32,
max_percent_speed_diff: float = 0.1,
):
"""
Ensures that the trained model is identical to the standard DDP implementation.
Expand Down Expand Up @@ -292,11 +296,7 @@ def plugin_parity_test(
accelerator=accelerator,
)

max_memory_ddp, ddp_time = record_ddp_fit_model_stats(
trainer=trainer,
model=ddp_model,
use_cuda=use_cuda
)
max_memory_ddp, ddp_time = record_ddp_fit_model_stats(trainer=trainer, model=ddp_model, use_cuda=use_cuda)

# Reset and train Custom DDP
seed_everything(seed)
Expand All @@ -312,9 +312,7 @@ def plugin_parity_test(
)

max_memory_custom, custom_model_time = record_ddp_fit_model_stats(
trainer=trainer,
model=custom_plugin_model,
use_cuda=use_cuda
trainer=trainer, model=custom_plugin_model, use_cuda=use_cuda
)

# Assert model parameters are identical after fit
Expand Down
20 changes: 12 additions & 8 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,8 @@ def _transform_changelog(path_in: str, path_out: str) -> None:
shutil.copy(md, os.path.join(PATH_HERE, FOLDER_GENERATED, os.path.basename(md)))
# copy also the changelog
_transform_changelog(
os.path.join(PATH_ROOT, 'CHANGELOG.md'), os.path.join(PATH_HERE, FOLDER_GENERATED, 'CHANGELOG.md')
os.path.join(PATH_ROOT, 'CHANGELOG.md'),
os.path.join(PATH_HERE, FOLDER_GENERATED, 'CHANGELOG.md'),
)

# -- Project information -----------------------------------------------------
Expand All @@ -86,7 +87,6 @@ def _transform_changelog(path_in: str, path_out: str) -> None:
# The full version, including alpha/beta/rc tags
release = pytorch_lightning.__version__


# -- General configuration ---------------------------------------------------

# If your documentation needs a minimal Sphinx version, state it here.
Expand Down Expand Up @@ -202,7 +202,6 @@ def _transform_changelog(path_in: str, path_out: str) -> None:
#
# html_sidebars = {}


# -- Options for HTMLHelp output ---------------------------------------------

# Output file base name for HTML help builder.
Expand Down Expand Up @@ -235,18 +234,23 @@ def _transform_changelog(path_in: str, path_out: str) -> None:

# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, project, project + ' Documentation', [author], 1)
]
man_pages = [(master_doc, project, project + ' Documentation', [author], 1)]

# -- Options for Texinfo output ----------------------------------------------

# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, project, project + ' Documentation', author, project,
'One line description of project.', 'Miscellaneous'),
(
master_doc,
project,
project + ' Documentation',
author,
project,
'One line description of project.',
'Miscellaneous',
),
]

# -- Options for Epub output -------------------------------------------------
Expand Down
1 change: 1 addition & 0 deletions legacy/zero_training.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@


class RandomDataset(Dataset):

def __init__(self, size, length: int = 100):
self.len = length
self.data = torch.randn(length, size)
Expand Down
18 changes: 17 additions & 1 deletion setup.cfg
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# limitations under the License.

[tool:pytest]

norecursedirs =
.git
dist
Expand All @@ -32,6 +31,7 @@ markers =
gpus_param_tests
junit_duration_report = call


[coverage:report]
exclude_lines =
pragma: no-cover
Expand All @@ -54,6 +54,7 @@ omit =
pytorch_lightning/utilities/distributed.py
pytorch_lightning/tuner/auto_gpu_select.py


[flake8]
# TODO: this should be 88 or 100 according PEP8
max-line-length = 120
Expand All @@ -70,6 +71,7 @@ ignore =
E231 # missing whitespace after ',', ';', or ':'; for black
W503 # line break before binary operator, need for black


# setup.cfg or tox.ini
[check-manifest]
ignore =
Expand All @@ -78,11 +80,13 @@ ignore =
.github/*
.circleci


[metadata]
license_file = LICENSE
# long_description = file:README.md
# long_description_content_type = text/markdown


[pydocstyle]
convention = pep257
# D104, D107: Ignore missing docstrings in __init__ files and methods.
Expand All @@ -91,6 +95,18 @@ add-ignore = D104,D107,D202
max-line-length = 120


[yapf]
based_on_style = pep8
spaces_before_comment = 2
split_before_logical_operator = true
COLUMN_LIMIT = 120
COALESCE_BRACKETS = true
DEDENT_CLOSING_BRACKETS = true
ALLOW_SPLIT_BEFORE_DICT_VALUE = false
BLANK_LINE_BEFORE_NESTED_CLASS_OR_DEF = true
NO_SPACES_AROUND_SELECTED_BINARY_OPERATORS = false


[mypy]
# Typing tests is low priority, but enabling type checking on the
# untyped test functions (using `--check-untyped-defs`) is still
Expand Down
8 changes: 1 addition & 7 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,9 +46,7 @@
extras['all'] = extras['dev'] + extras['examples'] # + extras['docs']

# These packages shall be installed only on GPU machines
PACKAGES_GPU_ONLY = (
'horovod',
)
PACKAGES_GPU_ONLY = ['horovod']
# create a version for CPU machines
for ex in ('cpu', 'cpu-extra'):
kw = ex.split('-')[1] if '-' in ex else 'all'
Expand All @@ -70,24 +68,20 @@
download_url='https://github.com/PyTorchLightning/pytorch-lightning',
license=pytorch_lightning.__license__,
packages=find_packages(exclude=['tests', 'tests/*', 'benchmarks', 'legacy', 'legacy/*']),

long_description=_load_readme_description(PATH_ROOT),
long_description_content_type='text/markdown',
include_package_data=True,
zip_safe=False,

keywords=['deep learning', 'pytorch', 'AI'],
python_requires='>=3.6',
setup_requires=[],
install_requires=_load_requirements(PATH_ROOT),
extras_require=extras,

project_urls={
"Bug Tracker": "https://github.com/PyTorchLightning/pytorch-lightning/issues",
"Documentation": "https://pytorch-lightning.rtfd.io/en/latest/",
"Source Code": "https://github.com/PyTorchLightning/pytorch-lightning",
},

classifiers=[
'Environment :: Console',
'Natural Language :: English',
Expand Down