Skip to content
This repository has been archived by the owner on Feb 22, 2020. It is now read-only.

Commit

Permalink
Merge branch 'master' into add-buildin-orchestration
Browse files Browse the repository at this point in the history
  • Loading branch information
Han Xiao authored Jul 16, 2019
2 parents 3423ec8 + 7bb0709 commit 64f1d30
Show file tree
Hide file tree
Showing 20 changed files with 873 additions and 79 deletions.
3 changes: 3 additions & 0 deletions LICENSE
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,9 @@ Copyright (c) 2014-2019 Anthon van der Neut, Ruamel bvba
6. jieba 0.39
Copyright (c) 2013 Sun Junyi

7. opencv-python 4.0.0
Copyright (c) 2016-2018 Olli-Pekka Heinisuo and contributors




Expand Down
4 changes: 4 additions & 0 deletions gnes/cli/parser.py
Original file line number Diff line number Diff line change
Expand Up @@ -183,6 +183,10 @@ def set_grpc_frontend_parser(parser=None):
_set_grpc_parser(parser)
parser.add_argument('--max_concurrency', type=int, default=10,
help='maximum concurrent client allowed')
parser.add_argument('--max_send_size', type=int, default=100,
help='maximum send size for grpc server in (M)')
parser.add_argument('--max_receive_size', type=int, default=100,
help='maximum receive size for grpc server in (M)')
return parser


Expand Down
1 change: 1 addition & 0 deletions gnes/encoder/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@
'PipelineEncoder': 'base',
'HashEncoder': 'numeric.hash',
'BasePytorchEncoder': 'image.base',
'TFInceptionEncoder': 'image.inception',
}

register_all_class(_cls2file_map)
70 changes: 70 additions & 0 deletions gnes/encoder/image/inception.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
# Tencent is pleased to support the open source community by making GNES available.
#
# Copyright (C) 2019 THL A29 Limited, a Tencent company. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import List
import numpy as np
from gnes.helper import batch_iterator
from ..base import BaseImageEncoder
from PIL import Image


class TFInceptionEncoder(BaseImageEncoder):

def __init__(self, model_dir: str,
batch_size: int = 64,
select_layer: str = 'PreLogitsFlatten',
use_gpu: bool = True,
*args, **kwargs):
super().__init__(*args, **kwargs)

self.model_dir = model_dir
self.batch_size = batch_size
self.select_layer = select_layer
self.use_gpu = use_gpu
self.inception_size_x = 299
self.inception_size_y = 299

def post_init(self):
import tensorflow as tf
from gnes.encoder.image.inception_cores.inception_v4 import inception_v4
from gnes.encoder.image.inception_cores.inception_utils import inception_arg_scope

arg_scope = inception_arg_scope()
inception_v4.default_image_size = self.inception_size_x
self.inputs = tf.placeholder(tf.float32, (None,
self.inception_size_x,
self.inception_size_y, 3))

with tf.contrib.slim.arg_scope(arg_scope):
self.logits, self.end_points = inception_v4(self.inputs,
is_training=False,
dropout_keep_prob=1.0)

config = tf.ConfigProto(log_device_placement=False)
if self.use_gpu:
config.gpu_options.allow_growth = True
self.sess = tf.Session(config=config)
self.saver = tf.train.Saver()
self.saver.restore(self.sess, self.model_dir)

def encode(self, img: List['np.ndarray'], *args, **kwargs) -> np.ndarray:
ret = []
img = [(np.array(Image.fromarray(im).resize((self.inception_size_x,
self.inception_size_y)), dtype=np.float32) * 2 / 255. - 1.) for im in img]
for _im in batch_iterator(img, self.batch_size):
_, end_points_ = self.sess.run((self.logits, self.end_points),
feed_dict={self.inputs: _im})
ret.append(end_points_[self.select_layer])
return np.concatenate(ret, axis=0).astype(np.float32)
Empty file.
82 changes: 82 additions & 0 deletions gnes/encoder/image/inception_cores/inception_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,82 @@
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains common code shared by all inception models.
Usage of arg scope:
with slim.arg_scope(inception_arg_scope()):
logits, end_points = inception.inception_v3(images, num_classes,
is_training=is_training)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import tensorflow as tf

slim = tf.contrib.slim


def inception_arg_scope(weight_decay=0.00004,
use_batch_norm=True,
batch_norm_decay=0.9997,
batch_norm_epsilon=0.001,
activation_fn=tf.nn.relu,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
batch_norm_scale=False):
"""Defines the default arg scope for inception models.
Args:
weight_decay: The weight decay to use for regularizing the model.
use_batch_norm: "If `True`, batch_norm is applied after each convolution.
batch_norm_decay: Decay for batch norm moving average.
batch_norm_epsilon: Small float added to variance to avoid dividing by zero
in batch norm.
activation_fn: Activation function for conv2d.
batch_norm_updates_collections: Collection for the update ops for
batch norm.
batch_norm_scale: If True, uses an explicit `gamma` multiplier to scale the
activations in the batch normalization layer.
Returns:
An `arg_scope` to use for the inception models.
"""
batch_norm_params = {
# Decay for the moving averages.
'decay': batch_norm_decay,
# epsilon to prevent 0s in variance.
'epsilon': batch_norm_epsilon,
# collection containing update_ops.
'updates_collections': batch_norm_updates_collections,
# use fused batch norm if possible.
'fused': None,
'scale': batch_norm_scale,
}
if use_batch_norm:
normalizer_fn = slim.batch_norm
normalizer_params = batch_norm_params
else:
normalizer_fn = None
normalizer_params = {}
# Set weight_decay for weights in Conv and FC layers.
with slim.arg_scope([slim.conv2d, slim.fully_connected],
weights_regularizer=slim.l2_regularizer(weight_decay)):
with slim.arg_scope(
[slim.conv2d],
weights_initializer=slim.variance_scaling_initializer(),
activation_fn=activation_fn,
normalizer_fn=normalizer_fn,
normalizer_params=normalizer_params) as sc:
return sc
Loading

0 comments on commit 64f1d30

Please sign in to comment.