Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[CodeStyle] use built-in open instead of io.open #46751

Merged
merged 2 commits into from
Oct 11, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,6 @@
# limitations under the License.

import copy
import io
import json
import os
import unicodedata
Expand Down Expand Up @@ -464,7 +463,7 @@ def from_pretrained(cls, pretrained_model_name_or_path, *args, **kwargs):
tokenizer_config_file = resolved_vocab_files.pop(
"tokenizer_config_file", None)
if tokenizer_config_file is not None:
with io.open(tokenizer_config_file, encoding="utf-8") as f:
with open(tokenizer_config_file, 'r', encoding="utf-8") as f:
init_kwargs = json.load(f)
else:
init_kwargs = init_configuration
Expand Down Expand Up @@ -527,7 +526,7 @@ def save_pretrained(self, save_directory):
self.tokenizer_config_file)
# init_config is set in metaclass created `__init__`,
tokenizer_config = self.init_config
with io.open(tokenizer_config_file, "w", encoding="utf-8") as f:
with open(tokenizer_config_file, "w", encoding="utf-8") as f:
f.write(json.dumps(tokenizer_config, ensure_ascii=False))

self.save_resources(save_directory)
Expand Down Expand Up @@ -571,7 +570,7 @@ def load_vocabulary(filepath,
Vocab: An instance of `Vocab`.
"""
token_to_idx = {}
with io.open(filepath, 'r', encoding='utf-8') as f:
with open(filepath, 'r', encoding='utf-8') as f:
for index, line in enumerate(f):
token = line.rstrip('\n')
token_to_idx[token] = int(index)
Expand Down
7 changes: 3 additions & 4 deletions tools/codestyle/copyright.hook
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,6 @@
# limitations under the License.

import argparse
import io
import re
import sys
import os
Expand Down Expand Up @@ -67,7 +66,7 @@ RE_SHEBANG = re.compile(r"^[ \t\v]*#[ \t]?\!")
def _check_copyright(path):
head=[]
try:
with open(path) as f:
with open(path, 'r', encoding='utf-8') as f:
Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里没有指定编码会在 Windows 上因为编码错误而无法提交,见 cattidea/paddle-flake8-project#66 (comment)

head = [next(f) for x in range(4)]
except StopIteration:
pass
Expand All @@ -79,7 +78,7 @@ def _check_copyright(path):
return False

def generate_copyright(path, comment_mark):
original_contents = io.open(path, encoding="utf-8").readlines()
original_contents = open(path, 'r', encoding="utf-8").readlines()
head = original_contents[0:4]

insert_line_no=0
Expand All @@ -102,7 +101,7 @@ def generate_copyright(path, comment_mark):
new_contents.extend(original_contents[insert_line_no:])
new_contents="".join(new_contents)

with io.open(path, 'w') as output_file:
with open(path, 'w', encoding='utf-8') as output_file:
output_file.write(new_contents)


Expand Down
13 changes: 6 additions & 7 deletions tools/prune_for_jetson.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import sys
import re
import glob
import io


def find_type_files(cur_dir, file_type, file_list=[]):
Expand Down Expand Up @@ -84,7 +83,7 @@ def prune_phi_kernels():

op_name = os.path.split(op_file)[1]
all_matches = []
with io.open(op_file, 'r', encoding='utf-8') as f:
with open(op_file, 'r', encoding='utf-8') as f:
content = ''.join(f.readlines())
op_pattern = 'PD_REGISTER_KERNEL\(.*?\).*?\{.*?\}'
op, op_count = find_kernel(content, op_pattern)
Expand All @@ -94,7 +93,7 @@ def prune_phi_kernels():
for p in all_matches:
content = content.replace(p, '')

with io.open(op_file, 'w', encoding='utf-8') as f:
with open(op_file, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(content))

print('We erase all grad op and kernel for Paddle-Inference lib.')
Expand Down Expand Up @@ -122,7 +121,7 @@ def append_fluid_kernels():
for op in op_white_list:
append_str = append_str + "file(APPEND ${pybind_file} \"USE_OP__(%s);\\n\")\n" % op

with io.open(file_name, 'r', encoding='utf-8') as f:
with open(file_name, 'r', encoding='utf-8') as f:
content = ''.join(f.readlines())

location_str = "nv_library(\n tensorrt_op_teller\n SRCS op_teller.cc\n DEPS framework_proto device_context)"
Expand All @@ -133,7 +132,7 @@ def append_fluid_kernels():
(location_str, file_name))
return False

with io.open(file_name, 'w', encoding='utf-8') as f:
with open(file_name, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(new_content))

#2. add op and kernel register
Expand All @@ -153,7 +152,7 @@ def append_fluid_kernels():
os.path.join(tool_dir, '../paddle/fluid/operators/'), '.cu', all_op)

for op_file in all_op:
with io.open(op_file, 'r', encoding='utf-8') as f:
with open(op_file, 'r', encoding='utf-8') as f:
content = ''.join(f.readlines())

for op in op_white_list:
Expand All @@ -169,7 +168,7 @@ def append_fluid_kernels():
if len(matches) > 0:
content = content.replace(matches[0],
matches[0].replace(k, k + "__"))
with io.open(op_file, 'w', encoding='utf-8') as f:
with open(op_file, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(content))

return True
Expand Down
5 changes: 2 additions & 3 deletions tools/remove_grad_op_and_kernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,6 @@
import sys
import re
import glob
import io


def find_type_files(cur_dir, file_type, file_list=[]):
Expand Down Expand Up @@ -126,7 +125,7 @@ def update_operator_cmake(cmake_file):
custom_pattern2 = custom_pattern2[:-1]

all_matches = []
with io.open(op_file, 'r', encoding='utf-8') as f:
with open(op_file, 'r', encoding='utf-8') as f:
content = ''.join(f.readlines())

op, op_count = remove_grad_op_and_kernel(content, op_pattern1,
Expand Down Expand Up @@ -159,7 +158,7 @@ def update_operator_cmake(cmake_file):
for i in all_matches:
content = content.replace(i, '')

with io.open(op_file, 'w', encoding='utf-8') as f:
with open(op_file, 'w', encoding='utf-8') as f:
f.write(u'{}'.format(content))

# 2. update operators/CMakeLists.txt
Expand Down