From a243b19a67ece49819139549a6b04f0200cd9419 Mon Sep 17 00:00:00 2001 From: Daniel Saez Date: Tue, 15 Nov 2016 16:53:17 +0000 Subject: [PATCH 01/11] Add option to save standalone model --- README.md | 2 + convert.py | 83 ++++++++++++++++++++++++++++++++++--- examples/mnist/README.md | 8 ++++ kaffe/tensorflow/network.py | 2 +- 4 files changed, 88 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index f1ce6be..5e3a6bf 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,8 @@ The output consists of two files: 1. A data file (in NumPy's native format) containing the model's learned parameters. 2. A Python class that constructs the model's graph. +Alternatively, you can save a standalone GraphDef model file containing the model's graph and learned parameters. + ### Examples See the [examples](examples/) folder for more details. diff --git a/convert.py b/convert.py index 30dcd4a..62b2164 100755 --- a/convert.py +++ b/convert.py @@ -7,6 +7,10 @@ from kaffe import KaffeError, print_stderr from kaffe.tensorflow import TensorFlowTransformer +import shutil +import tensorflow as tf +from tensorflow.python.tools.freeze_graph import freeze_graph + def fatal_error(msg): print_stderr(msg) @@ -16,25 +20,91 @@ def fatal_error(msg): def validate_arguments(args): if (args.data_output_path is not None) and (args.caffemodel is None): fatal_error('No input data path provided.') - if (args.caffemodel is not None) and (args.data_output_path is None): + if (args.caffemodel is not None) and (args.data_output_path is None) and (args.standalone_output_path is None): fatal_error('No output data path provided.') - if (args.code_output_path is None) and (args.data_output_path is None): + if (args.code_output_path is None) and (args.data_output_path is None) and (args.standalone_output_path is None): fatal_error('No output path specified.') -def convert(def_path, caffemodel_path, data_output_path, code_output_path, phase): +def convert(def_path, caffemodel_path, data_output_path, code_output_path, standalone_output_path, phase): try: + sess = tf.InteractiveSession() transformer = TensorFlowTransformer(def_path, caffemodel_path, phase=phase) print_stderr('Converting data...') - if caffemodel_path is not None: + if data_output_path is not None: data = transformer.transform_data() print_stderr('Saving data...') with open(data_output_path, 'wb') as data_out: np.save(data_out, data) - if code_output_path: + if code_output_path is not None: print_stderr('Saving source...') with open(code_output_path, 'wb') as src_out: src_out.write(transformer.transform_source()) + + if standalone_output_path: + filename, _ = os.path.splitext(os.path.basename(standalone_output_path)) + temp_folder = os.path.join(os.path.dirname(standalone_output_path), '.tmp') + os.makedirs(temp_folder) + + if data_output_path is None: + data = transformer.transform_data() + print_stderr('Saving data...') + data_output_path = os.path.join(temp_folder, filename) + '.npy' + print data_output_path + with open(data_output_path, 'wb') as data_out: + np.save(data_out, data) + + if code_output_path is None: + print_stderr('Saving source...') + code_output_path = os.path.join(temp_folder, filename) + '.py' + with open(code_output_path, 'wb') as src_out: + src_out.write(transformer.transform_source()) + + checkpoint_path = os.path.join(temp_folder, filename + '.ckpt') + graph_name = os.path.basename(standalone_output_path) + graph_folder = os.path.dirname(standalone_output_path) + input_node = transformer.graph.nodes[0].name + output_node = transformer.graph.nodes[-1].name + tensor_shape = transformer.graph.get_node(input_node).output_shape + tensor_shape_list = [tensor_shape.batch_size, tensor_shape.height, tensor_shape.width, tensor_shape.channels] + + sys.path.append(os.path.dirname(code_output_path)) + module = os.path.splitext(os.path.basename(code_output_path))[0] + class_name = transformer.graph.name + KaffeNet = getattr(__import__(module), class_name) + + data_placeholder = tf.placeholder(tf.float32, tensor_shape_list, name=input_node) + net = KaffeNet({input_node: data_placeholder}) + + # load weights stored in numpy format + net.load(data_output_path, sess) + + print_stderr('Saving checkpoint...') + saver = tf.train.Saver() + saver.save(sess, checkpoint_path) + + print_stderr('Saving graph definition as protobuf...') + tf.train.write_graph(sess.graph.as_graph_def(), graph_folder, graph_name, False) + + input_graph_path = standalone_output_path + input_saver_def_path = "" + input_binary = True + input_checkpoint_path = checkpoint_path + output_node_names = output_node + restore_op_name = 'save/restore_all' + filename_tensor_name = 'save/Const:0' + output_graph_path = standalone_output_path + clear_devices = True + + print_stderr('Saving standalone model...') + freeze_graph(input_graph_path, input_saver_def_path, + input_binary, input_checkpoint_path, + output_node_names, restore_op_name, + filename_tensor_name, output_graph_path, + clear_devices, '') + + shutil.rmtree(temp_folder) + print_stderr('Done.') except KaffeError as err: fatal_error('Error encountered: {}'.format(err)) @@ -46,6 +116,7 @@ def main(): parser.add_argument('--caffemodel', help='Model data (.caffemodel) path') parser.add_argument('--data-output-path', help='Converted data output path') parser.add_argument('--code-output-path', help='Save generated source to this path') + parser.add_argument('--standalone-output-path', help='Save generated standalone tensorflow model to this path') parser.add_argument('-p', '--phase', default='test', @@ -53,7 +124,7 @@ def main(): args = parser.parse_args() validate_arguments(args) convert(args.def_path, args.caffemodel, args.data_output_path, args.code_output_path, - args.phase) + args.standalone_output_path, args.phase) if __name__ == '__main__': diff --git a/examples/mnist/README.md b/examples/mnist/README.md index bdda642..cc0ba7f 100644 --- a/examples/mnist/README.md +++ b/examples/mnist/README.md @@ -34,3 +34,11 @@ with tf.Session() as sesh: # Forward pass output = sesh.run(net.get_output(), ...) ``` + +#### Standalone model file: + +You can save a standalone GraphDef model file as follows: + + $ ./convert.py examples/mnist/lenet.prototxt --caffemodel examples/mnist/lenet_iter_10000.caffemodel --standalone-output-path=mynet.pb + +This generates a protobuf file named `mynet.pb` containing the model's graph and parameters. The [TensorFlow Image Recognition tutorial](https://www.tensorflow.org/versions/r0.11/tutorials/image_recognition/index.html) shows how to use models constructed in this way in [Python](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/models/image/imagenet) or [C++](https://github.com/tensorflow/tensorflow/tree/master/tensorflow/examples/label_image). \ No newline at end of file diff --git a/kaffe/tensorflow/network.py b/kaffe/tensorflow/network.py index 3dc34ca..6f3b153 100644 --- a/kaffe/tensorflow/network.py +++ b/kaffe/tensorflow/network.py @@ -212,7 +212,7 @@ def softmax(self, input, name): input = tf.squeeze(input, squeeze_dims=[1, 2]) else: raise ValueError('Rank 2 tensor input expected for softmax!') - return tf.nn.softmax(input, name) + return tf.nn.softmax(input, name=name) @layer def batch_normalization(self, input, name, scale_offset=True, relu=False): From 6520a8164ed383dab9a3edafe1799de9f864880d Mon Sep 17 00:00:00 2001 From: Daniel Saez Date: Thu, 5 Jan 2017 10:39:24 +0000 Subject: [PATCH 02/11] Add default graph name --- kaffe/tensorflow/transformer.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/kaffe/tensorflow/transformer.py b/kaffe/tensorflow/transformer.py index 34bfc9a..68e37af 100644 --- a/kaffe/tensorflow/transformer.py +++ b/kaffe/tensorflow/transformer.py @@ -281,5 +281,7 @@ def transform_source(self): mapper = TensorFlowMapper(self.graph) chains = mapper.map() emitter = TensorFlowEmitter() + if not self.graph.name: + self.graph.name = 'MyNet' self.source = emitter.emit(self.graph.name, chains) return self.source From 3f6854e8b08cf73ac430f1d9251c3758c0e1b570 Mon Sep 17 00:00:00 2001 From: Daniel Saez Date: Thu, 5 Jan 2017 10:39:47 +0000 Subject: [PATCH 03/11] Remove print statements --- convert.py | 1 - 1 file changed, 1 deletion(-) diff --git a/convert.py b/convert.py index 62b2164..1c005ce 100755 --- a/convert.py +++ b/convert.py @@ -50,7 +50,6 @@ def convert(def_path, caffemodel_path, data_output_path, code_output_path, stand data = transformer.transform_data() print_stderr('Saving data...') data_output_path = os.path.join(temp_folder, filename) + '.npy' - print data_output_path with open(data_output_path, 'wb') as data_out: np.save(data_out, data) From fd54fb26517350e7ff8ab07cabbe1a62ea15eeb6 Mon Sep 17 00:00:00 2001 From: Keunhong Park Date: Mon, 18 Apr 2016 22:49:19 -0700 Subject: [PATCH 04/11] Migrate to Python3 --- convert.py | 2 - kaffe/caffe/caffepb.py | 245 ++++++++++--------------- kaffe/core.py | 406 +++++++++++++++++++++++++++++++++++++++++ kaffe/layers.py | 2 +- kaffe/shapes.py | 2 +- test.py | 86 +++++++++ 6 files changed, 587 insertions(+), 156 deletions(-) create mode 100644 kaffe/core.py create mode 100755 test.py diff --git a/convert.py b/convert.py index 30dcd4a..60d17ba 100755 --- a/convert.py +++ b/convert.py @@ -1,7 +1,5 @@ #!/usr/bin/env python -import os -import sys import numpy as np import argparse from kaffe import KaffeError, print_stderr diff --git a/kaffe/caffe/caffepb.py b/kaffe/caffe/caffepb.py index c7583c6..56233d1 100644 --- a/kaffe/caffe/caffepb.py +++ b/kaffe/caffe/caffepb.py @@ -1003,7 +1003,7 @@ _descriptor.FieldDescriptor( name='type', full_name='caffe.FillerParameter.type', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=unicode("constant", "utf-8"), + has_default_value=True, default_value=str("constant", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1081,7 +1081,7 @@ _descriptor.FieldDescriptor( name='name', full_name='caffe.NetParameter.name', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1165,7 +1165,7 @@ _descriptor.FieldDescriptor( name='net', full_name='caffe.SolverParameter.net', index=0, number=24, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1179,7 +1179,7 @@ _descriptor.FieldDescriptor( name='train_net', full_name='caffe.SolverParameter.train_net', index=2, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1284,7 +1284,7 @@ _descriptor.FieldDescriptor( name='lr_policy', full_name='caffe.SolverParameter.lr_policy', index=17, number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1319,7 +1319,7 @@ _descriptor.FieldDescriptor( name='regularization_type', full_name='caffe.SolverParameter.regularization_type', index=22, number=29, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=unicode("L2", "utf-8"), + has_default_value=True, default_value=str("L2", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1354,7 +1354,7 @@ _descriptor.FieldDescriptor( name='snapshot_prefix', full_name='caffe.SolverParameter.snapshot_prefix', index=27, number=15, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1396,7 +1396,7 @@ _descriptor.FieldDescriptor( name='type', full_name='caffe.SolverParameter.type', index=33, number=40, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=unicode("SGD", "utf-8"), + has_default_value=True, default_value=str("SGD", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1476,7 +1476,7 @@ _descriptor.FieldDescriptor( name='learned_net', full_name='caffe.SolverState.learned_net', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1616,7 +1616,7 @@ _descriptor.FieldDescriptor( name='name', full_name='caffe.ParamSpec.name', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1666,14 +1666,14 @@ _descriptor.FieldDescriptor( name='name', full_name='caffe.LayerParameter.name', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='type', full_name='caffe.LayerParameter.type', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -2100,7 +2100,7 @@ _descriptor.FieldDescriptor( name='mean_file', full_name='caffe.TransformationParameter.mean_file', index=3, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -2613,7 +2613,7 @@ _descriptor.FieldDescriptor( name='source', full_name='caffe.DataParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -2648,7 +2648,7 @@ _descriptor.FieldDescriptor( name='mean_file', full_name='caffe.DataParameter.mean_file', index=5, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3000,7 +3000,7 @@ _descriptor.FieldDescriptor( name='source', full_name='caffe.HDF5DataParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3042,7 +3042,7 @@ _descriptor.FieldDescriptor( name='file_name', full_name='caffe.HDF5OutputParameter.file_name', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3099,7 +3099,7 @@ _descriptor.FieldDescriptor( name='source', full_name='caffe.ImageDataParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3155,7 +3155,7 @@ _descriptor.FieldDescriptor( name='mean_file', full_name='caffe.ImageDataParameter.mean_file', index=8, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3176,7 +3176,7 @@ _descriptor.FieldDescriptor( name='root_folder', full_name='caffe.ImageDataParameter.root_folder', index=11, number=12, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=unicode("", "utf-8"), + has_default_value=True, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3204,7 +3204,7 @@ _descriptor.FieldDescriptor( name='source', full_name='caffe.InfogainLossParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3670,21 +3670,21 @@ _descriptor.FieldDescriptor( name='module', full_name='caffe.PythonParameter.module', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='layer', full_name='caffe.PythonParameter.layer', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='param_str', full_name='caffe.PythonParameter.param_str', index=2, number=3, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=unicode("", "utf-8"), + has_default_value=True, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4095,7 +4095,7 @@ _descriptor.FieldDescriptor( name='source', full_name='caffe.WindowDataParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4109,7 +4109,7 @@ _descriptor.FieldDescriptor( name='mean_file', full_name='caffe.WindowDataParameter.mean_file', index=2, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4165,7 +4165,7 @@ _descriptor.FieldDescriptor( name='crop_mode', full_name='caffe.WindowDataParameter.crop_mode', index=10, number=11, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=unicode("warp", "utf-8"), + has_default_value=True, default_value=str("warp", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4179,7 +4179,7 @@ _descriptor.FieldDescriptor( name='root_folder', full_name='caffe.WindowDataParameter.root_folder', index=12, number=13, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=unicode("", "utf-8"), + has_default_value=True, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4265,7 +4265,7 @@ _descriptor.FieldDescriptor( name='name', full_name='caffe.V1LayerParameter.name', index=2, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4575,14 +4575,14 @@ _descriptor.FieldDescriptor( name='name', full_name='caffe.V0LayerParameter.name', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='type', full_name='caffe.V0LayerParameter.type', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4687,7 +4687,7 @@ _descriptor.FieldDescriptor( name='source', full_name='caffe.V0LayerParameter.source', index=16, number=16, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4701,7 +4701,7 @@ _descriptor.FieldDescriptor( name='meanfile', full_name='caffe.V0LayerParameter.meanfile', index=18, number=18, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=unicode("", "utf-8"), + has_default_value=False, default_value=str("", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4785,7 +4785,7 @@ _descriptor.FieldDescriptor( name='det_crop_mode', full_name='caffe.V0LayerParameter.det_crop_mode', index=30, number=59, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=unicode("warp", "utf-8"), + has_default_value=True, default_value=str("warp", "utf-8"), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -5111,356 +5111,297 @@ DESCRIPTOR.message_types_by_name['V0LayerParameter'] = _V0LAYERPARAMETER DESCRIPTOR.message_types_by_name['PReLUParameter'] = _PRELUPARAMETER -class BlobShape(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class BlobShape(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _BLOBSHAPE # @@protoc_insertion_point(class_scope:caffe.BlobShape) -class BlobProto(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class BlobProto(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _BLOBPROTO # @@protoc_insertion_point(class_scope:caffe.BlobProto) -class BlobProtoVector(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class BlobProtoVector(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _BLOBPROTOVECTOR # @@protoc_insertion_point(class_scope:caffe.BlobProtoVector) -class Datum(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class Datum(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _DATUM # @@protoc_insertion_point(class_scope:caffe.Datum) -class FillerParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class FillerParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _FILLERPARAMETER # @@protoc_insertion_point(class_scope:caffe.FillerParameter) -class NetParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class NetParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _NETPARAMETER # @@protoc_insertion_point(class_scope:caffe.NetParameter) -class SolverParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class SolverParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _SOLVERPARAMETER # @@protoc_insertion_point(class_scope:caffe.SolverParameter) -class SolverState(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class SolverState(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _SOLVERSTATE # @@protoc_insertion_point(class_scope:caffe.SolverState) -class NetState(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class NetState(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _NETSTATE # @@protoc_insertion_point(class_scope:caffe.NetState) -class NetStateRule(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class NetStateRule(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _NETSTATERULE # @@protoc_insertion_point(class_scope:caffe.NetStateRule) -class ParamSpec(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class ParamSpec(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _PARAMSPEC # @@protoc_insertion_point(class_scope:caffe.ParamSpec) -class LayerParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class LayerParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _LAYERPARAMETER # @@protoc_insertion_point(class_scope:caffe.LayerParameter) -class TransformationParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class TransformationParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _TRANSFORMATIONPARAMETER # @@protoc_insertion_point(class_scope:caffe.TransformationParameter) -class LossParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class LossParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _LOSSPARAMETER # @@protoc_insertion_point(class_scope:caffe.LossParameter) -class AccuracyParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class AccuracyParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _ACCURACYPARAMETER # @@protoc_insertion_point(class_scope:caffe.AccuracyParameter) -class ArgMaxParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class ArgMaxParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _ARGMAXPARAMETER # @@protoc_insertion_point(class_scope:caffe.ArgMaxParameter) -class ConcatParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class ConcatParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _CONCATPARAMETER # @@protoc_insertion_point(class_scope:caffe.ConcatParameter) -class BatchNormParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class BatchNormParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _BATCHNORMPARAMETER # @@protoc_insertion_point(class_scope:caffe.BatchNormParameter) -class BiasParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class BiasParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _BIASPARAMETER # @@protoc_insertion_point(class_scope:caffe.BiasParameter) -class ContrastiveLossParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class ContrastiveLossParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _CONTRASTIVELOSSPARAMETER # @@protoc_insertion_point(class_scope:caffe.ContrastiveLossParameter) -class ConvolutionParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class ConvolutionParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _CONVOLUTIONPARAMETER # @@protoc_insertion_point(class_scope:caffe.ConvolutionParameter) -class CropParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class CropParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _CROPPARAMETER # @@protoc_insertion_point(class_scope:caffe.CropParameter) -class DataParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class DataParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _DATAPARAMETER # @@protoc_insertion_point(class_scope:caffe.DataParameter) -class DropoutParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class DropoutParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _DROPOUTPARAMETER # @@protoc_insertion_point(class_scope:caffe.DropoutParameter) -class DummyDataParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class DummyDataParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _DUMMYDATAPARAMETER # @@protoc_insertion_point(class_scope:caffe.DummyDataParameter) -class EltwiseParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class EltwiseParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _ELTWISEPARAMETER # @@protoc_insertion_point(class_scope:caffe.EltwiseParameter) -class ELUParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class ELUParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _ELUPARAMETER # @@protoc_insertion_point(class_scope:caffe.ELUParameter) -class EmbedParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class EmbedParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _EMBEDPARAMETER # @@protoc_insertion_point(class_scope:caffe.EmbedParameter) -class ExpParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class ExpParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _EXPPARAMETER # @@protoc_insertion_point(class_scope:caffe.ExpParameter) -class FlattenParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class FlattenParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _FLATTENPARAMETER # @@protoc_insertion_point(class_scope:caffe.FlattenParameter) -class HDF5DataParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class HDF5DataParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _HDF5DATAPARAMETER # @@protoc_insertion_point(class_scope:caffe.HDF5DataParameter) -class HDF5OutputParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class HDF5OutputParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _HDF5OUTPUTPARAMETER # @@protoc_insertion_point(class_scope:caffe.HDF5OutputParameter) -class HingeLossParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class HingeLossParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _HINGELOSSPARAMETER # @@protoc_insertion_point(class_scope:caffe.HingeLossParameter) -class ImageDataParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class ImageDataParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _IMAGEDATAPARAMETER # @@protoc_insertion_point(class_scope:caffe.ImageDataParameter) -class InfogainLossParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class InfogainLossParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _INFOGAINLOSSPARAMETER # @@protoc_insertion_point(class_scope:caffe.InfogainLossParameter) -class InnerProductParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class InnerProductParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _INNERPRODUCTPARAMETER # @@protoc_insertion_point(class_scope:caffe.InnerProductParameter) -class InputParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class InputParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _INPUTPARAMETER # @@protoc_insertion_point(class_scope:caffe.InputParameter) -class LogParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class LogParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _LOGPARAMETER # @@protoc_insertion_point(class_scope:caffe.LogParameter) -class LRNParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class LRNParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _LRNPARAMETER # @@protoc_insertion_point(class_scope:caffe.LRNParameter) -class MemoryDataParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class MemoryDataParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _MEMORYDATAPARAMETER # @@protoc_insertion_point(class_scope:caffe.MemoryDataParameter) -class MVNParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class MVNParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _MVNPARAMETER # @@protoc_insertion_point(class_scope:caffe.MVNParameter) -class PoolingParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class PoolingParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _POOLINGPARAMETER # @@protoc_insertion_point(class_scope:caffe.PoolingParameter) -class PowerParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class PowerParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _POWERPARAMETER # @@protoc_insertion_point(class_scope:caffe.PowerParameter) -class PythonParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class PythonParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _PYTHONPARAMETER # @@protoc_insertion_point(class_scope:caffe.PythonParameter) -class ReductionParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class ReductionParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _REDUCTIONPARAMETER # @@protoc_insertion_point(class_scope:caffe.ReductionParameter) -class ReLUParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class ReLUParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _RELUPARAMETER # @@protoc_insertion_point(class_scope:caffe.ReLUParameter) -class ReshapeParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class ReshapeParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _RESHAPEPARAMETER # @@protoc_insertion_point(class_scope:caffe.ReshapeParameter) -class ScaleParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class ScaleParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _SCALEPARAMETER # @@protoc_insertion_point(class_scope:caffe.ScaleParameter) -class SigmoidParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class SigmoidParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _SIGMOIDPARAMETER # @@protoc_insertion_point(class_scope:caffe.SigmoidParameter) -class SliceParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class SliceParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _SLICEPARAMETER # @@protoc_insertion_point(class_scope:caffe.SliceParameter) -class SoftmaxParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class SoftmaxParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _SOFTMAXPARAMETER # @@protoc_insertion_point(class_scope:caffe.SoftmaxParameter) -class TanHParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class TanHParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _TANHPARAMETER # @@protoc_insertion_point(class_scope:caffe.TanHParameter) -class TileParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class TileParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _TILEPARAMETER # @@protoc_insertion_point(class_scope:caffe.TileParameter) -class ThresholdParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class ThresholdParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _THRESHOLDPARAMETER # @@protoc_insertion_point(class_scope:caffe.ThresholdParameter) -class WindowDataParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class WindowDataParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _WINDOWDATAPARAMETER # @@protoc_insertion_point(class_scope:caffe.WindowDataParameter) -class SPPParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class SPPParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _SPPPARAMETER # @@protoc_insertion_point(class_scope:caffe.SPPParameter) -class V1LayerParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class V1LayerParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _V1LAYERPARAMETER # @@protoc_insertion_point(class_scope:caffe.V1LayerParameter) -class V0LayerParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class V0LayerParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _V0LAYERPARAMETER # @@protoc_insertion_point(class_scope:caffe.V0LayerParameter) -class PReLUParameter(_message.Message): - __metaclass__ = _reflection.GeneratedProtocolMessageType +class PReLUParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): DESCRIPTOR = _PRELUPARAMETER # @@protoc_insertion_point(class_scope:caffe.PReLUParameter) diff --git a/kaffe/core.py b/kaffe/core.py new file mode 100644 index 0000000..3a39d3b --- /dev/null +++ b/kaffe/core.py @@ -0,0 +1,406 @@ +import os +import sys +import numpy as np +from google.protobuf import text_format + +from .layers import * +from .core import print_stderr + +try: + import caffe + PYCAFFE_AVAILABLE = True +except ImportError: + import kaffe.caffe_pb2 as caffepb + PYCAFFE_AVAILABLE = False + print_stderr('WARNING: PyCaffe not found!') + print_stderr('Falling back to protocol buffer implementation.') + print_stderr('* Conversions will be drastically slower.') + print_stderr('* This backend is UNTESTED!') + +if PYCAFFE_AVAILABLE: + # Use the protobuf code from the imported distribution. + # This way, Caffe variants with custom layers will work. + try: + sys.path.append(os.path.join(os.path.dirname(caffe.__file__), 'proto/')) + from . import caffe_pb2 as caffepb + except ImportError: + # import kaffe.caffepb + print_stderr('Failed to import dist protobuf code. Using failsafe.') + print_stderr('Custom layers might not work.') + +class Node(object): + def __init__(self, name, kind, layer=None): + self.name = name + self.kind = kind + self.layer = LayerAdapter(layer, kind) if layer else None + self.parents = [] + self.children = [] + self.data = None + self.output_shape = None + self.metadata = {} + + def add_parent(self, parent_node): + assert parent_node not in self.parents + self.parents.append(parent_node) + if self not in parent_node.children: + parent_node.children.append(self) + + def add_child(self, child_node): + assert child_node not in self.children + self.children.append(child_node) + if self not in child_node.parents: + child_node.parents.append(self) + + def get_only_parent(self): + if len(self.parents)!=1: + raise KaffeError('Node (%s) expected to have 1 parent. Found %s.'%(self, len(self.parents))) + return self.parents[0] + + @property + def parameters(self): + if self.layer is not None: + return self.layer.parameters + return None + + @property + def data_shape(self): + assert self.data + return self.data[IDX_WEIGHTS].shape + + def __str__(self): + return '[%s] %s'%(self.kind, self.name) + + def __repr__(self): + return '%s (0x%x)'%(self.name, id(self)) + +class Graph(object): + def __init__(self, nodes=None, name=None): + self.nodes = nodes or [] + self.node_lut = {node.name:node for node in self.nodes} + self.name = name + + def add_node(self, node): + self.nodes.append(node) + self.node_lut[node.name] = node + + def get_node(self, name): + try: + return self.node_lut[name] + except KeyError: + raise KaffeError('Layer not found: %s'%name) + + def get_input_nodes(self): + return [node for node in self.nodes if len(node.parents)==0] + + def get_output_nodes(self): + return [node for node in self.nodes if len(node.children)==0] + + def topologically_sorted(self): + sorted_nodes = [] + unsorted_nodes = list(self.nodes) + temp_marked = set() + perm_marked = set() + def visit(node): + if node in temp_marked: + raise KaffeError('Graph is not a DAG.') + if node in perm_marked: + return + temp_marked.add(node) + for child in node.children: + visit(child) + perm_marked.add(node) + temp_marked.remove(node) + sorted_nodes.insert(0, node) + while len(unsorted_nodes): + visit(unsorted_nodes.pop()) + return sorted_nodes + + def compute_output_shapes(self): + sorted_nodes = self.topologically_sorted() + for node in sorted_nodes: + node.output_shape = NodeKind.compute_output_shape(node) + + def __contains__(self, key): + return key in self.node_lut + + def __str__(self): + hdr = '{:<20} {:<30} {:>20} {:>20}'.format('Type', 'Name', 'Param', 'Output') + s = [hdr, '-'*94] + for node in self.topologically_sorted(): + data_shape = node.data[IDX_WEIGHTS].shape if node.data else '--' + out_shape = node.output_shape or '--' + s.append('{:<20} {:<30} {:>20} {:>20}'.format(node.kind, + node.name, str(data_shape), str(out_shape))) + return '\n'.join(s) + +class DataInjector(object): + def __init__(self, def_path, data_path): + self.def_path = def_path + self.data_path = data_path + self.did_use_pb = False + self.load() + + def load(self): + if PYCAFFE_AVAILABLE: + self.load_using_caffe() + else: + self.load_using_pb() + + def load_using_caffe(self): + net = caffe.Net(self.def_path, self.data_path, caffe.TEST) + data = lambda blob: blob.data + self.params = [(k, list(map(data, v))) for k,v in list(net.params.items())] + + def load_using_pb(self): + data = caffepb.NetParameter() + data.MergeFromString(open(self.data_path, 'rb').read()) + pair = lambda layer: (layer.name, self.transform_data(layer)) + layers = data.layers or data.layer + self.params = [pair(layer) for layer in layers if layer.blobs] + self.did_use_pb = True + + def transform_data(self, layer): + transformed = [] + for idx, blob in enumerate(layer.blobs): + if len(blob.shape.dim): + dims = blob.shape.dim + c_o, c_i, h, w = list(map(int, [1]*(4-len(dims))+list(dims))) + else: + c_o = blob.num + c_i = blob.channels + h = blob.height + w = blob.width + data = np.array(blob.data, dtype=np.float32).reshape(c_o, c_i, h, w) + transformed.append(data) + return transformed + + def adjust_parameters(self, node, data): + if not self.did_use_pb: + return data + # When using the protobuf-backend, each parameter initially has four dimensions. + # In certain cases (like FC layers), we want to eliminate the singleton dimensions. + # This implementation takes care of the common cases. However, it does leave the + # potential for future issues. + # The Caffe-backend does not suffer from this problem. + data = list(data) + squeeze_indices = [1] # Squeeze biases. + if node.kind==NodeKind.InnerProduct: + squeeze_indices.append(0) # Squeeze FC. + for idx in squeeze_indices: + data[idx] = np.squeeze(data[idx]) + return data + + def inject(self, graph): + for layer_name, data in self.params: + if layer_name in graph: + node = graph.get_node(layer_name) + node.data = self.adjust_parameters(node, data) + else: + print_stderr('Ignoring parameters for non-existent layer: %s'%layer_name) + +class DataReshaper(object): + def __init__(self, mapping): + self.mapping = mapping + + def map(self, ndim): + try: + return self.mapping[ndim] + except KeyError: + raise KaffeError('Ordering not found for %d dimensional tensor.'%ndim) + + def transpose(self, data): + return data.transpose(self.map(data.ndim)) + + def has_spatial_parent(self, node): + try: + parent = node.get_only_parent() + s = parent.output_shape + return (s[IDX_H]>1 or s[IDX_W]>1) + except KaffeError: + return False + + def reshape(self, graph, replace=True): + for node in graph.nodes: + if node.data is None: + continue + data = node.data[IDX_WEIGHTS] + if (node.kind==NodeKind.InnerProduct) and self.has_spatial_parent(node): + # The FC layer connected to the spatial layer needs to be + # re-wired to match the new spatial ordering. + in_shape = node.get_only_parent().output_shape + fc_shape = data.shape + fc_order = self.map(2) + data = data.reshape((fc_shape[IDX_C_OUT], in_shape[IDX_C], in_shape[IDX_H], in_shape[IDX_W])) + data = self.transpose(data) + node.reshaped_data = data.reshape(fc_shape[fc_order[0]], fc_shape[fc_order[1]]) + else: + node.reshaped_data = self.transpose(data) + + if replace: + for node in graph.nodes: + if node.data is not None: + node.data[IDX_WEIGHTS] = node.reshaped_data + del node.reshaped_data + +class GraphBuilder(object): + def __init__(self, def_path, data_path=None, phase='test'): + self.def_path = def_path + self.data_path = data_path + self.phase = phase + self.load() + + def load(self): + self.params = caffepb.NetParameter() + with open(self.def_path, 'r') as def_file: + text_format.Merge(def_file.read(), self.params) + + def filter_layers(self, layers): + phase_map = {0:'train', 1:'test'} + filtered_layer_names = set() + filtered_layers = [] + for layer in layers: + phase = self.phase + if len(layer.include): + phase = phase_map[layer.include[0].phase] + if len(layer.exclude): + phase = phase_map[1-layer.include[0].phase] + exclude = (phase!=self.phase) + # Dropout layers appear in a fair number of Caffe + # test-time networks. These are just ignored. We'll + # filter them out here. + if (not exclude) and (phase=='test'): + exclude = (layer.type==LayerType.Dropout) + if not exclude: + filtered_layers.append(layer) + # Guard against dupes. + assert layer.name not in filtered_layer_names + filtered_layer_names.add(layer.name) + return filtered_layers + + def make_node(self, layer): + kind = NodeKind.map_raw_kind(layer.type) + if kind is None: + raise KaffeError('Unknown layer type encountered: %s'%layer.type) + return Node(layer.name, kind, layer=layer) + + def make_input_nodes(self): + # This method is for old-style inputs, where the input specification + # was not treated as a first-class layer in the prototext. + # Newer models use the "Input layer" type. + nodes = [Node(name, NodeKind.Data) for name in self.params.input] + if len(nodes): + input_dim = list(map(int, self.params.input_dim)) + if not input_dim: + if len(self.params.input_shape)>0: + input_dim = list(map(int, self.params.input_shape[0].dim)) + else: + raise KaffeError('Dimensions for input not specified.') + for node in nodes: + node.output_shape = tuple(input_dim) + return nodes + + def fuse_relus(self, nodes): + fused_nodes = [] + for node in nodes: + if node.kind!=NodeKind.ReLU: + continue + parent = node.get_only_parent() + if len(parent.children)!=1: + # We can only fuse this ReLU if its parent's + # value isn't used by any other node. + continue + # Rewrite the ReLU's children to its parent. + for child in node.children: + child.parents.remove(node) + parent.add_child(child) + # Disconnect the ReLU from the graph. + parent.children.remove(node) + fused_nodes.append(node) + # Annotate the fused node. + parent.metadata['relu'] = True + return [node for node in nodes if node not in fused_nodes] + + def build(self, fuse_relus=True): + layers = self.params.layers or self.params.layer + layers = self.filter_layers(layers) + nodes = self.make_input_nodes() + nodes += [self.make_node(layer) for layer in layers] + graph = Graph(nodes=nodes, name=self.params.name) + node_outputs = {} + for layer in layers: + node = graph.get_node(layer.name) + for parent_name in layer.bottom: + assert parent_name!=layer.name + parent_node = node_outputs.get(parent_name) + if (parent_node is None) or (parent_node==node): + parent_node = graph.get_node(parent_name) + node.add_parent(parent_node) + for child_name in layer.top: + if child_name==layer.name: + continue + if child_name in graph: + # This is an "in-place operation" that overwrites an existing node. + # This would create a cycle in the graph. We'll undo the in-placing + # by substituting this node wherever the overwritten node is referenced. + node_outputs[child_name] = node + else: + # This is an "implicit" child node: not explicitly + # defined in the prototxt, but as a top (output) for some layer. + graph.add_node(Node(child_name, NodeKind.Implicit)) + node.add_child(graph.get_node(child_name)) + if fuse_relus: + graph = Graph(nodes=self.fuse_relus(graph.nodes), name=graph.name) + graph.compute_output_shapes() + if self.data_path is not None: + DataInjector(self.def_path, self.data_path).inject(graph) + return graph + +class NodeMapper(NodeDispatch): + def __init__(self, graph): + self.graph = graph + + def attach_node(self, node): + return True + + def map(self): + nodes = self.graph.topologically_sorted() + # Remove input nodes - we'll handle them separately. + input_nodes = self.graph.get_input_nodes() + nodes = [t for t in nodes if t not in input_nodes] + # Remove implicit nodes. + nodes = [t for t in nodes if t.kind!=NodeKind.Implicit] + # Decompose DAG into chains. + chains = [] + for node in nodes: + attach_to_chain = None + if len(node.parents)==1: + parent = node.get_only_parent() + for chain in chains: + if chain[-1]==parent: + # Node is part of an existing chain. + attach_to_chain = chain + break + if attach_to_chain is None: + # Start a new chain for this node. + attach_to_chain = [] + chains.append(attach_to_chain) + attach_to_chain.append(node) + # Map each chain. + mapped_chains = [] + for chain in chains: + mapped_chains.append(self.map_chain(chain)) + return self.commit(mapped_chains) + + def map_chain(self, chain): + return [self.map_node(node) for node in chain] + + def map_node(self, node): + map_func = self.get_handler(node.kind, 'map') + mapped_node = map_func(node) + assert mapped_node is not None + if self.attach_node(node): + mapped_node.node = node + return mapped_node + + def commit(self, mapped_chains): + raise NotImplementedError('Must be implemented by subclass.') diff --git a/kaffe/layers.py b/kaffe/layers.py index c3c5955..1154237 100644 --- a/kaffe/layers.py +++ b/kaffe/layers.py @@ -51,7 +51,7 @@ 'Threshold': shape_identity, } -LAYER_TYPES = LAYER_DESCRIPTORS.keys() +LAYER_TYPES = list(LAYER_DESCRIPTORS.keys()) LayerType = type('LayerType', (), {t: t for t in LAYER_TYPES}) diff --git a/kaffe/shapes.py b/kaffe/shapes.py index a70ff14..0dfd230 100644 --- a/kaffe/shapes.py +++ b/kaffe/shapes.py @@ -42,7 +42,7 @@ def shape_data(node): return node.output_shape try: # New-style input specification - return map(int, node.parameters.shape[0].dim) + return list(map(int, node.parameters.shape[0].dim)) except: # We most likely have a data layer on our hands. The problem is, # Caffe infers the dimensions of the data from the source (eg: LMDB). diff --git a/test.py b/test.py new file mode 100755 index 0000000..a7c2a22 --- /dev/null +++ b/test.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python + +import os +import sys +import cv2 +import numpy as np +import tensorflow as tf +import examples + +class ImageNet(object): + def __init__(self, val_path, data_path, model): + gt_lines = open(val_path).readlines() + gt_pairs = [line.split() for line in gt_lines] + self.image_paths = [os.path.join(data_path, p[0]) for p in gt_pairs] + self.labels = np.array([int(p[1]) for p in gt_pairs]) + self.model = model + self.mean = np.array([104., 117., 124.]) + + def read_image(self, path): + img = cv2.imread(path) + h, w, c = np.shape(img) + scale_size = self.model.scale_size + crop_size = self.model.crop_size + assert c==3 + if self.model.isotropic: + aspect = float(w)/h + if w6}/{:<6} {:>6.2f}%'.format(count, total, cur_accuracy))) + print(('Top %s Accuracy: %s'%(top_k, float(correct)/total))) + +def main(): + args = sys.argv[1:] + if len(args) not in (3, 4): + print(('usage: %s net.params imagenet-val.txt imagenet-data-dir [model-index=0]'%os.path.basename(__file__))) + exit(-1) + model_index = 0 if len(args)==3 else int(args[3]) + if model_index>=len(examples.MODELS): + print('Invalid model index. Options are:') + for idx, model in enumerate(examples.MODELS): + print(('%s: %s'%(idx, model))) + exit(-1) + model = examples.MODELS[model_index] + print(('Using model: %s'%(model))) + test_imagenet(model, *args[:3]) + +if __name__ == '__main__': + main() From 4068ea31a1ff7dc91cd429e091e61b039448ce1b Mon Sep 17 00:00:00 2001 From: kpar Date: Thu, 19 Jan 2017 13:40:12 -0800 Subject: [PATCH 05/11] Migrate to Python 3, make V1 compatible. --- convert.py | 2 +- kaffe/caffe/caffe.proto | 1404 +++++++++++++++++++++++++++++++ kaffe/caffe/caffepb.py | 1401 +++++++++++++++++++----------- kaffe/caffe/resolver.py | 17 +- kaffe/core.py | 2 +- kaffe/graph.py | 17 +- kaffe/layers.py | 48 +- kaffe/tensorflow/transformer.py | 4 +- 8 files changed, 2399 insertions(+), 496 deletions(-) create mode 100644 kaffe/caffe/caffe.proto diff --git a/convert.py b/convert.py index 60d17ba..c37713d 100755 --- a/convert.py +++ b/convert.py @@ -31,7 +31,7 @@ def convert(def_path, caffemodel_path, data_output_path, code_output_path, phase np.save(data_out, data) if code_output_path: print_stderr('Saving source...') - with open(code_output_path, 'wb') as src_out: + with open(code_output_path, 'w') as src_out: src_out.write(transformer.transform_source()) print_stderr('Done.') except KaffeError as err: diff --git a/kaffe/caffe/caffe.proto b/kaffe/caffe/caffe.proto new file mode 100644 index 0000000..1c85f69 --- /dev/null +++ b/kaffe/caffe/caffe.proto @@ -0,0 +1,1404 @@ +syntax = "proto2"; + +package caffe; + +// Specifies the shape (dimensions) of a Blob. +message BlobShape { + repeated int64 dim = 1 [packed = true]; +} + +message BlobProto { + optional BlobShape shape = 7; + repeated float data = 5 [packed = true]; + repeated float diff = 6 [packed = true]; + repeated double double_data = 8 [packed = true]; + repeated double double_diff = 9 [packed = true]; + + // 4D dimensions -- deprecated. Use "shape" instead. + optional int32 num = 1 [default = 0]; + optional int32 channels = 2 [default = 0]; + optional int32 height = 3 [default = 0]; + optional int32 width = 4 [default = 0]; +} + +// The BlobProtoVector is simply a way to pass multiple blobproto instances +// around. +message BlobProtoVector { + repeated BlobProto blobs = 1; +} + +message Datum { + optional int32 channels = 1; + optional int32 height = 2; + optional int32 width = 3; + // the actual image data, in bytes + optional bytes data = 4; + optional int32 label = 5; + // Optionally, the datum could also hold float data. + repeated float float_data = 6; + // If true data contains an encoded image that need to be decoded + optional bool encoded = 7 [default = false]; +} + +message FillerParameter { + // The filler type. + optional string type = 1 [default = 'constant']; + optional float value = 2 [default = 0]; // the value in constant filler + optional float min = 3 [default = 0]; // the min value in uniform filler + optional float max = 4 [default = 1]; // the max value in uniform filler + optional float mean = 5 [default = 0]; // the mean value in Gaussian filler + optional float std = 6 [default = 1]; // the std value in Gaussian filler + // The expected number of non-zero output weights for a given input in + // Gaussian filler -- the default -1 means don't perform sparsification. + optional int32 sparse = 7 [default = -1]; + // Normalize the filler variance by fan_in, fan_out, or their average. + // Applies to 'xavier' and 'msra' fillers. + enum VarianceNorm { + FAN_IN = 0; + FAN_OUT = 1; + AVERAGE = 2; + } + optional VarianceNorm variance_norm = 8 [default = FAN_IN]; +} + +message NetParameter { + optional string name = 1; // consider giving the network a name + // DEPRECATED. See InputParameter. The input blobs to the network. + repeated string input = 3; + // DEPRECATED. See InputParameter. The shape of the input blobs. + repeated BlobShape input_shape = 8; + + // 4D input dimensions -- deprecated. Use "input_shape" instead. + // If specified, for each input blob there should be four + // values specifying the num, channels, height and width of the input blob. + // Thus, there should be a total of (4 * #input) numbers. + repeated int32 input_dim = 4; + + // Whether the network will force every layer to carry out backward operation. + // If set False, then whether to carry out backward is determined + // automatically according to the net structure and learning rates. + optional bool force_backward = 5 [default = false]; + // The current "state" of the network, including the phase, level, and stage. + // Some layers may be included/excluded depending on this state and the states + // specified in the layers' include and exclude fields. + optional NetState state = 6; + + // Print debugging information about results while running Net::Forward, + // Net::Backward, and Net::Update. + optional bool debug_info = 7 [default = false]; + + // The layers that make up the net. Each of their configurations, including + // connectivity and behavior, is specified as a LayerParameter. + repeated LayerParameter layer = 100; // ID 100 so layers are printed last. + + // DEPRECATED: use 'layer' instead. + repeated V1LayerParameter layers = 2; +} + +// NOTE +// Update the next available ID when you add a new SolverParameter field. +// +// SolverParameter next available ID: 42 (last added: layer_wise_reduce) +message SolverParameter { + ////////////////////////////////////////////////////////////////////////////// + // Specifying the train and test networks + // + // Exactly one train net must be specified using one of the following fields: + // train_net_param, train_net, net_param, net + // One or more test nets may be specified using any of the following fields: + // test_net_param, test_net, net_param, net + // If more than one test net field is specified (e.g., both net and + // test_net are specified), they will be evaluated in the field order given + // above: (1) test_net_param, (2) test_net, (3) net_param/net. + // A test_iter must be specified for each test_net. + // A test_level and/or a test_stage may also be specified for each test_net. + ////////////////////////////////////////////////////////////////////////////// + + // Proto filename for the train net, possibly combined with one or more + // test nets. + optional string net = 24; + // Inline train net param, possibly combined with one or more test nets. + optional NetParameter net_param = 25; + + optional string train_net = 1; // Proto filename for the train net. + repeated string test_net = 2; // Proto filenames for the test nets. + optional NetParameter train_net_param = 21; // Inline train net params. + repeated NetParameter test_net_param = 22; // Inline test net params. + + // The states for the train/test nets. Must be unspecified or + // specified once per net. + // + // By default, all states will have solver = true; + // train_state will have phase = TRAIN, + // and all test_state's will have phase = TEST. + // Other defaults are set according to the NetState defaults. + optional NetState train_state = 26; + repeated NetState test_state = 27; + + // The number of iterations for each test net. + repeated int32 test_iter = 3; + + // The number of iterations between two testing phases. + optional int32 test_interval = 4 [default = 0]; + optional bool test_compute_loss = 19 [default = false]; + // If true, run an initial test pass before the first iteration, + // ensuring memory availability and printing the starting value of the loss. + optional bool test_initialization = 32 [default = true]; + optional float base_lr = 5; // The base learning rate + // the number of iterations between displaying info. If display = 0, no info + // will be displayed. + optional int32 display = 6; + // Display the loss averaged over the last average_loss iterations + optional int32 average_loss = 33 [default = 1]; + optional int32 max_iter = 7; // the maximum number of iterations + // accumulate gradients over `iter_size` x `batch_size` instances + optional int32 iter_size = 36 [default = 1]; + + // The learning rate decay policy. The currently implemented learning rate + // policies are as follows: + // - fixed: always return base_lr. + // - step: return base_lr * gamma ^ (floor(iter / step)) + // - exp: return base_lr * gamma ^ iter + // - inv: return base_lr * (1 + gamma * iter) ^ (- power) + // - multistep: similar to step but it allows non uniform steps defined by + // stepvalue + // - poly: the effective learning rate follows a polynomial decay, to be + // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power) + // - sigmoid: the effective learning rate follows a sigmod decay + // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize)))) + // + // where base_lr, max_iter, gamma, step, stepvalue and power are defined + // in the solver parameter protocol buffer, and iter is the current iteration. + optional string lr_policy = 8; + optional float gamma = 9; // The parameter to compute the learning rate. + optional float power = 10; // The parameter to compute the learning rate. + optional float momentum = 11; // The momentum value. + optional float weight_decay = 12; // The weight decay. + // regularization types supported: L1 and L2 + // controlled by weight_decay + optional string regularization_type = 29 [default = "L2"]; + // the stepsize for learning rate policy "step" + optional int32 stepsize = 13; + // the stepsize for learning rate policy "multistep" + repeated int32 stepvalue = 34; + + // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm, + // whenever their actual L2 norm is larger. + optional float clip_gradients = 35 [default = -1]; + + optional int32 snapshot = 14 [default = 0]; // The snapshot interval + optional string snapshot_prefix = 15; // The prefix for the snapshot. + // whether to snapshot diff in the results or not. Snapshotting diff will help + // debugging but the final protocol buffer size will be much larger. + optional bool snapshot_diff = 16 [default = false]; + enum SnapshotFormat { + HDF5 = 0; + BINARYPROTO = 1; + } + optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO]; + // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default. + enum SolverMode { + CPU = 0; + GPU = 1; + } + optional SolverMode solver_mode = 17 [default = GPU]; + // the device_id will that be used in GPU mode. Use device_id = 0 in default. + optional int32 device_id = 18 [default = 0]; + // If non-negative, the seed with which the Solver will initialize the Caffe + // random number generator -- useful for reproducible results. Otherwise, + // (and by default) initialize using a seed derived from the system clock. + optional int64 random_seed = 20 [default = -1]; + + // type of the solver + optional string type = 40 [default = "SGD"]; + + // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam + optional float delta = 31 [default = 1e-8]; + // parameters for the Adam solver + optional float momentum2 = 39 [default = 0.999]; + + // RMSProp decay value + // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t) + optional float rms_decay = 38 [default = 0.99]; + + // If true, print information about the state of the net that may help with + // debugging learning problems. + optional bool debug_info = 23 [default = false]; + + // If false, don't save a snapshot after training finishes. + optional bool snapshot_after_train = 28 [default = true]; + + // DEPRECATED: old solver enum types, use string instead + enum SolverType { + SGD = 0; + NESTEROV = 1; + ADAGRAD = 2; + RMSPROP = 3; + ADADELTA = 4; + ADAM = 5; + } + // DEPRECATED: use type instead of solver_type + optional SolverType solver_type = 30 [default = SGD]; + + // Overlap compute and communication for data parallel training + optional bool layer_wise_reduce = 41 [default = true]; +} + +// A message that stores the solver snapshots +message SolverState { + optional int32 iter = 1; // The current iteration + optional string learned_net = 2; // The file that stores the learned net. + repeated BlobProto history = 3; // The history for sgd solvers + optional int32 current_step = 4 [default = 0]; // The current step for learning rate +} + +enum Phase { + TRAIN = 0; + TEST = 1; +} + +message NetState { + optional Phase phase = 1 [default = TEST]; + optional int32 level = 2 [default = 0]; + repeated string stage = 3; +} + +message NetStateRule { + // Set phase to require the NetState have a particular phase (TRAIN or TEST) + // to meet this rule. + optional Phase phase = 1; + + // Set the minimum and/or maximum levels in which the layer should be used. + // Leave undefined to meet the rule regardless of level. + optional int32 min_level = 2; + optional int32 max_level = 3; + + // Customizable sets of stages to include or exclude. + // The net must have ALL of the specified stages and NONE of the specified + // "not_stage"s to meet the rule. + // (Use multiple NetStateRules to specify conjunctions of stages.) + repeated string stage = 4; + repeated string not_stage = 5; +} + +// Specifies training parameters (multipliers on global learning constants, +// and the name and other settings used for weight sharing). +message ParamSpec { + // The names of the parameter blobs -- useful for sharing parameters among + // layers, but never required otherwise. To share a parameter between two + // layers, give it a (non-empty) name. + optional string name = 1; + + // Whether to require shared weights to have the same shape, or just the same + // count -- defaults to STRICT if unspecified. + optional DimCheckMode share_mode = 2; + enum DimCheckMode { + // STRICT (default) requires that num, channels, height, width each match. + STRICT = 0; + // PERMISSIVE requires only the count (num*channels*height*width) to match. + PERMISSIVE = 1; + } + + // The multiplier on the global learning rate for this parameter. + optional float lr_mult = 3 [default = 1.0]; + + // The multiplier on the global weight decay for this parameter. + optional float decay_mult = 4 [default = 1.0]; +} + +// NOTE +// Update the next available ID when you add a new LayerParameter field. +// +// LayerParameter next available layer-specific ID: 147 (last added: recurrent_param) +message LayerParameter { + optional string name = 1; // the layer name + optional string type = 2; // the layer type + repeated string bottom = 3; // the name of each bottom blob + repeated string top = 4; // the name of each top blob + + // The train / test phase for computation. + optional Phase phase = 10; + + // The amount of weight to assign each top blob in the objective. + // Each layer assigns a default value, usually of either 0 or 1, + // to each top blob. + repeated float loss_weight = 5; + + // Specifies training parameters (multipliers on global learning constants, + // and the name and other settings used for weight sharing). + repeated ParamSpec param = 6; + + // The blobs containing the numeric parameters of the layer. + repeated BlobProto blobs = 7; + + // Specifies whether to backpropagate to each bottom. If unspecified, + // Caffe will automatically infer whether each input needs backpropagation + // to compute parameter gradients. If set to true for some inputs, + // backpropagation to those inputs is forced; if set false for some inputs, + // backpropagation to those inputs is skipped. + // + // The size must be either 0 or equal to the number of bottoms. + repeated bool propagate_down = 11; + + // Rules controlling whether and when a layer is included in the network, + // based on the current NetState. You may specify a non-zero number of rules + // to include OR exclude, but not both. If no include or exclude rules are + // specified, the layer is always included. If the current NetState meets + // ANY (i.e., one or more) of the specified rules, the layer is + // included/excluded. + repeated NetStateRule include = 8; + repeated NetStateRule exclude = 9; + + // Parameters for data pre-processing. + optional TransformationParameter transform_param = 100; + + // Parameters shared by loss layers. + optional LossParameter loss_param = 101; + + // Layer type-specific parameters. + // + // Note: certain layers may have more than one computational engine + // for their implementation. These layers include an Engine type and + // engine parameter for selecting the implementation. + // The default for the engine is set by the ENGINE switch at compile-time. + optional AccuracyParameter accuracy_param = 102; + optional ArgMaxParameter argmax_param = 103; + optional BatchNormParameter batch_norm_param = 139; + optional BiasParameter bias_param = 141; + optional ConcatParameter concat_param = 104; + optional ContrastiveLossParameter contrastive_loss_param = 105; + optional ConvolutionParameter convolution_param = 106; + optional CropParameter crop_param = 144; + optional DataParameter data_param = 107; + optional DropoutParameter dropout_param = 108; + optional DummyDataParameter dummy_data_param = 109; + optional EltwiseParameter eltwise_param = 110; + optional ELUParameter elu_param = 140; + optional EmbedParameter embed_param = 137; + optional ExpParameter exp_param = 111; + optional FlattenParameter flatten_param = 135; + optional HDF5DataParameter hdf5_data_param = 112; + optional HDF5OutputParameter hdf5_output_param = 113; + optional HingeLossParameter hinge_loss_param = 114; + optional ImageDataParameter image_data_param = 115; + optional InfogainLossParameter infogain_loss_param = 116; + optional InnerProductParameter inner_product_param = 117; + optional InputParameter input_param = 143; + optional LogParameter log_param = 134; + optional LRNParameter lrn_param = 118; + optional MemoryDataParameter memory_data_param = 119; + optional MVNParameter mvn_param = 120; + optional ParameterParameter parameter_param = 145; + optional PoolingParameter pooling_param = 121; + optional PowerParameter power_param = 122; + optional PReLUParameter prelu_param = 131; + optional PythonParameter python_param = 130; + optional RecurrentParameter recurrent_param = 146; + optional ReductionParameter reduction_param = 136; + optional ReLUParameter relu_param = 123; + optional ReshapeParameter reshape_param = 133; + optional ScaleParameter scale_param = 142; + optional SigmoidParameter sigmoid_param = 124; + optional SoftmaxParameter softmax_param = 125; + optional SPPParameter spp_param = 132; + optional SliceParameter slice_param = 126; + optional TanHParameter tanh_param = 127; + optional ThresholdParameter threshold_param = 128; + optional TileParameter tile_param = 138; + optional WindowDataParameter window_data_param = 129; +} + +// Message that stores parameters used to apply transformation +// to the data layer's data +message TransformationParameter { + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 1 [default = 1]; + // Specify if we want to randomly mirror data. + optional bool mirror = 2 [default = false]; + // Specify if we would like to randomly crop an image. + optional uint32 crop_size = 3 [default = 0]; + // mean_file and mean_value cannot be specified at the same time + optional string mean_file = 4; + // if specified can be repeated once (would subtract it from all the channels) + // or can be repeated the same number of times as channels + // (would subtract them from the corresponding channel) + repeated float mean_value = 5; + // Force the decoded image to have 3 color channels. + optional bool force_color = 6 [default = false]; + // Force the decoded image to have 1 color channels. + optional bool force_gray = 7 [default = false]; +} + +// Message that stores parameters shared by loss layers +message LossParameter { + // If specified, ignore instances with the given label. + optional int32 ignore_label = 1; + // How to normalize the loss for loss layers that aggregate across batches, + // spatial dimensions, or other dimensions. Currently only implemented in + // SoftmaxWithLoss and SigmoidCrossEntropyLoss layers. + enum NormalizationMode { + // Divide by the number of examples in the batch times spatial dimensions. + // Outputs that receive the ignore label will NOT be ignored in computing + // the normalization factor. + FULL = 0; + // Divide by the total number of output locations that do not take the + // ignore_label. If ignore_label is not set, this behaves like FULL. + VALID = 1; + // Divide by the batch size. + BATCH_SIZE = 2; + // Do not normalize the loss. + NONE = 3; + } + // For historical reasons, the default normalization for + // SigmoidCrossEntropyLoss is BATCH_SIZE and *not* VALID. + optional NormalizationMode normalization = 3 [default = VALID]; + // Deprecated. Ignored if normalization is specified. If normalization + // is not specified, then setting this to false will be equivalent to + // normalization = BATCH_SIZE to be consistent with previous behavior. + optional bool normalize = 2; +} + +// Messages that store parameters used by individual layer types follow, in +// alphabetical order. + +message AccuracyParameter { + // When computing accuracy, count as correct by comparing the true label to + // the top k scoring classes. By default, only compare to the top scoring + // class (i.e. argmax). + optional uint32 top_k = 1 [default = 1]; + + // The "label" axis of the prediction blob, whose argmax corresponds to the + // predicted label -- may be negative to index from the end (e.g., -1 for the + // last axis). For example, if axis == 1 and the predictions are + // (N x C x H x W), the label blob is expected to contain N*H*W ground truth + // labels with integer values in {0, 1, ..., C-1}. + optional int32 axis = 2 [default = 1]; + + // If specified, ignore instances with the given label. + optional int32 ignore_label = 3; +} + +message ArgMaxParameter { + // If true produce pairs (argmax, maxval) + optional bool out_max_val = 1 [default = false]; + optional uint32 top_k = 2 [default = 1]; + // The axis along which to maximise -- may be negative to index from the + // end (e.g., -1 for the last axis). + // By default ArgMaxLayer maximizes over the flattened trailing dimensions + // for each index of the first / num dimension. + optional int32 axis = 3; +} + +message ConcatParameter { + // The axis along which to concatenate -- may be negative to index from the + // end (e.g., -1 for the last axis). Other axes must have the + // same dimension for all the bottom blobs. + // By default, ConcatLayer concatenates blobs along the "channels" axis (1). + optional int32 axis = 2 [default = 1]; + + // DEPRECATED: alias for "axis" -- does not support negative indexing. + optional uint32 concat_dim = 1 [default = 1]; +} + +message BatchNormParameter { + // If false, accumulate global mean/variance values via a moving average. If + // true, use those accumulated values instead of computing mean/variance + // across the batch. + optional bool use_global_stats = 1; + // How much does the moving average decay each iteration? + optional float moving_average_fraction = 2 [default = .999]; + // Small value to add to the variance estimate so that we don't divide by + // zero. + optional float eps = 3 [default = 1e-5]; +} + +message BiasParameter { + // The first axis of bottom[0] (the first input Blob) along which to apply + // bottom[1] (the second input Blob). May be negative to index from the end + // (e.g., -1 for the last axis). + // + // For example, if bottom[0] is 4D with shape 100x3x40x60, the output + // top[0] will have the same shape, and bottom[1] may have any of the + // following shapes (for the given value of axis): + // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 + // (axis == 1 == -3) 3; 3x40; 3x40x60 + // (axis == 2 == -2) 40; 40x60 + // (axis == 3 == -1) 60 + // Furthermore, bottom[1] may have the empty shape (regardless of the value of + // "axis") -- a scalar bias. + optional int32 axis = 1 [default = 1]; + + // (num_axes is ignored unless just one bottom is given and the bias is + // a learned parameter of the layer. Otherwise, num_axes is determined by the + // number of axes by the second bottom.) + // The number of axes of the input (bottom[0]) covered by the bias + // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. + // Set num_axes := 0, to add a zero-axis Blob: a scalar. + optional int32 num_axes = 2 [default = 1]; + + // (filler is ignored unless just one bottom is given and the bias is + // a learned parameter of the layer.) + // The initialization for the learned bias parameter. + // Default is the zero (0) initialization, resulting in the BiasLayer + // initially performing the identity operation. + optional FillerParameter filler = 3; +} + +message ContrastiveLossParameter { + // margin for dissimilar pair + optional float margin = 1 [default = 1.0]; + // The first implementation of this cost did not exactly match the cost of + // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2. + // legacy_version = false (the default) uses (margin - d)^2 as proposed in the + // Hadsell paper. New models should probably use this version. + // legacy_version = true uses (margin - d^2). This is kept to support / + // reproduce existing models and results + optional bool legacy_version = 2 [default = false]; +} + +message ConvolutionParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + optional bool bias_term = 2 [default = true]; // whether to have bias terms + + // Pad, kernel size, and stride are all given as a single value for equal + // dimensions in all spatial dimensions, or once per spatial dimension. + repeated uint32 pad = 3; // The padding size; defaults to 0 + repeated uint32 kernel_size = 4; // The kernel size + repeated uint32 stride = 6; // The stride; defaults to 1 + // Factor used to dilate the kernel, (implicitly) zero-filling the resulting + // holes. (Kernel dilation is sometimes referred to by its use in the + // algorithme à trous from Holschneider et al. 1987.) + repeated uint32 dilation = 18; // The dilation; defaults to 1 + + // For 2D convolution only, the *_h and *_w versions may also be used to + // specify both spatial dimensions. + optional uint32 pad_h = 9 [default = 0]; // The padding height (2D only) + optional uint32 pad_w = 10 [default = 0]; // The padding width (2D only) + optional uint32 kernel_h = 11; // The kernel height (2D only) + optional uint32 kernel_w = 12; // The kernel width (2D only) + optional uint32 stride_h = 13; // The stride height (2D only) + optional uint32 stride_w = 14; // The stride width (2D only) + + optional uint32 group = 5 [default = 1]; // The group size for group conv + + optional FillerParameter weight_filler = 7; // The filler for the weight + optional FillerParameter bias_filler = 8; // The filler for the bias + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 15 [default = DEFAULT]; + + // The axis to interpret as "channels" when performing convolution. + // Preceding dimensions are treated as independent inputs; + // succeeding dimensions are treated as "spatial". + // With (N, C, H, W) inputs, and axis == 1 (the default), we perform + // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for + // groups g>1) filters across the spatial axes (H, W) of the input. + // With (N, C, D, H, W) inputs, and axis == 1, we perform + // N independent 3D convolutions, sliding (C/g)-channels + // filters across the spatial axes (D, H, W) of the input. + optional int32 axis = 16 [default = 1]; + + // Whether to force use of the general ND convolution, even if a specific + // implementation for blobs of the appropriate number of spatial dimensions + // is available. (Currently, there is only a 2D-specific convolution + // implementation; for input blobs with num_axes != 2, this option is + // ignored and the ND implementation will be used.) + optional bool force_nd_im2col = 17 [default = false]; +} + +message CropParameter { + // To crop, elements of the first bottom are selected to fit the dimensions + // of the second, reference bottom. The crop is configured by + // - the crop `axis` to pick the dimensions for cropping + // - the crop `offset` to set the shift for all/each dimension + // to align the cropped bottom with the reference bottom. + // All dimensions up to but excluding `axis` are preserved, while + // the dimensions including and trailing `axis` are cropped. + // If only one `offset` is set, then all dimensions are offset by this amount. + // Otherwise, the number of offsets must equal the number of cropped axes to + // shift the crop in each dimension accordingly. + // Note: standard dimensions are N,C,H,W so the default is a spatial crop, + // and `axis` may be negative to index from the end (e.g., -1 for the last + // axis). + optional int32 axis = 1 [default = 2]; + repeated uint32 offset = 2; +} + +message DataParameter { + enum DB { + LEVELDB = 0; + LMDB = 1; + } + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 4; + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + // DEPRECATED. Each solver accesses a different subset of the database. + optional uint32 rand_skip = 7 [default = 0]; + optional DB backend = 8 [default = LEVELDB]; + // DEPRECATED. See TransformationParameter. For data pre-processing, we can do + // simple scaling and subtracting the data mean, if provided. Note that the + // mean subtraction is always carried out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // DEPRECATED. See TransformationParameter. Specify if we would like to randomly + // crop an image. + optional uint32 crop_size = 5 [default = 0]; + // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror + // data. + optional bool mirror = 6 [default = false]; + // Force the encoded image to have 3 color channels + optional bool force_encoded_color = 9 [default = false]; + // Prefetch queue (Increase if data feeding bandwidth varies, within the + // limit of device memory for GPU training) + optional uint32 prefetch = 10 [default = 4]; +} + +message DropoutParameter { + optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio +} + +// DummyDataLayer fills any number of arbitrarily shaped blobs with random +// (or constant) data generated by "Fillers" (see "message FillerParameter"). +message DummyDataParameter { + // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N + // shape fields, and 0, 1 or N data_fillers. + // + // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used. + // If 1 data_filler is specified, it is applied to all top blobs. If N are + // specified, the ith is applied to the ith top blob. + repeated FillerParameter data_filler = 1; + repeated BlobShape shape = 6; + + // 4D dimensions -- deprecated. Use "shape" instead. + repeated uint32 num = 2; + repeated uint32 channels = 3; + repeated uint32 height = 4; + repeated uint32 width = 5; +} + +message EltwiseParameter { + enum EltwiseOp { + PROD = 0; + SUM = 1; + MAX = 2; + } + optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation + repeated float coeff = 2; // blob-wise coefficient for SUM operation + + // Whether to use an asymptotically slower (for >2 inputs) but stabler method + // of computing the gradient for the PROD operation. (No effect for SUM op.) + optional bool stable_prod_grad = 3 [default = true]; +} + +// Message that stores parameters used by ELULayer +message ELUParameter { + // Described in: + // Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate + // Deep Network Learning by Exponential Linear Units (ELUs). arXiv + optional float alpha = 1 [default = 1]; +} + +// Message that stores parameters used by EmbedLayer +message EmbedParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + // The input is given as integers to be interpreted as one-hot + // vector indices with dimension num_input. Hence num_input should be + // 1 greater than the maximum possible input value. + optional uint32 input_dim = 2; + + optional bool bias_term = 3 [default = true]; // Whether to use a bias term + optional FillerParameter weight_filler = 4; // The filler for the weight + optional FillerParameter bias_filler = 5; // The filler for the bias + +} + +// Message that stores parameters used by ExpLayer +message ExpParameter { + // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0. + // Or if base is set to the default (-1), base is set to e, + // so y = exp(shift + scale * x). + optional float base = 1 [default = -1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + +/// Message that stores parameters used by FlattenLayer +message FlattenParameter { + // The first axis to flatten: all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 1 [default = 1]; + + // The last axis to flatten: all following axes are retained in the output. + // May be negative to index from the end (e.g., the default -1 for the last + // axis). + optional int32 end_axis = 2 [default = -1]; +} + +// Message that stores parameters used by HDF5DataLayer +message HDF5DataParameter { + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 2; + + // Specify whether to shuffle the data. + // If shuffle == true, the ordering of the HDF5 files is shuffled, + // and the ordering of data within any given HDF5 file is shuffled, + // but data between different files are not interleaved; all of a file's + // data are output (in a random order) before moving onto another file. + optional bool shuffle = 3 [default = false]; +} + +message HDF5OutputParameter { + optional string file_name = 1; +} + +message HingeLossParameter { + enum Norm { + L1 = 1; + L2 = 2; + } + // Specify the Norm to use L1 or L2 + optional Norm norm = 1 [default = L1]; +} + +message ImageDataParameter { + // Specify the data source. + optional string source = 1; + // Specify the batch size. + optional uint32 batch_size = 4 [default = 1]; + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + optional uint32 rand_skip = 7 [default = 0]; + // Whether or not ImageLayer should shuffle the list of files at every epoch. + optional bool shuffle = 8 [default = false]; + // It will also resize images if new_height or new_width are not zero. + optional uint32 new_height = 9 [default = 0]; + optional uint32 new_width = 10 [default = 0]; + // Specify if the images are color or gray + optional bool is_color = 11 [default = true]; + // DEPRECATED. See TransformationParameter. For data pre-processing, we can do + // simple scaling and subtracting the data mean, if provided. Note that the + // mean subtraction is always carried out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // DEPRECATED. See TransformationParameter. Specify if we would like to randomly + // crop an image. + optional uint32 crop_size = 5 [default = 0]; + // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror + // data. + optional bool mirror = 6 [default = false]; + optional string root_folder = 12 [default = ""]; +} + +message InfogainLossParameter { + // Specify the infogain matrix source. + optional string source = 1; +} + +message InnerProductParameter { + optional uint32 num_output = 1; // The number of outputs for the layer + optional bool bias_term = 2 [default = true]; // whether to have bias terms + optional FillerParameter weight_filler = 3; // The filler for the weight + optional FillerParameter bias_filler = 4; // The filler for the bias + + // The first axis to be lumped into a single inner product computation; + // all preceding axes are retained in the output. + // May be negative to index from the end (e.g., -1 for the last axis). + optional int32 axis = 5 [default = 1]; + // Specify whether to transpose the weight matrix or not. + // If transpose == true, any operations will be performed on the transpose + // of the weight matrix. The weight matrix itself is not going to be transposed + // but rather the transfer flag of operations will be toggled accordingly. + optional bool transpose = 6 [default = false]; +} + +message InputParameter { + // This layer produces N >= 1 top blob(s) to be assigned manually. + // Define N shapes to set a shape for each top. + // Define 1 shape to set the same shape for every top. + // Define no shape to defer to reshaping manually. + repeated BlobShape shape = 1; +} + +// Message that stores parameters used by LogLayer +message LogParameter { + // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0. + // Or if base is set to the default (-1), base is set to e, + // so y = ln(shift + scale * x) = log_e(shift + scale * x) + optional float base = 1 [default = -1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + +// Message that stores parameters used by LRNLayer +message LRNParameter { + optional uint32 local_size = 1 [default = 5]; + optional float alpha = 2 [default = 1.]; + optional float beta = 3 [default = 0.75]; + enum NormRegion { + ACROSS_CHANNELS = 0; + WITHIN_CHANNEL = 1; + } + optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS]; + optional float k = 5 [default = 1.]; + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 6 [default = DEFAULT]; +} + +message MemoryDataParameter { + optional uint32 batch_size = 1; + optional uint32 channels = 2; + optional uint32 height = 3; + optional uint32 width = 4; +} + +message MVNParameter { + // This parameter can be set to false to normalize mean only + optional bool normalize_variance = 1 [default = true]; + + // This parameter can be set to true to perform DNN-like MVN + optional bool across_channels = 2 [default = false]; + + // Epsilon for not dividing by zero while normalizing variance + optional float eps = 3 [default = 1e-9]; +} + +message ParameterParameter { + optional BlobShape shape = 1; +} + +message PoolingParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional PoolMethod pool = 1 [default = MAX]; // The pooling method + // Pad, kernel size, and stride are all given as a single value for equal + // dimensions in height and width or as Y, X pairs. + optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X) + optional uint32 pad_h = 9 [default = 0]; // The padding height + optional uint32 pad_w = 10 [default = 0]; // The padding width + optional uint32 kernel_size = 2; // The kernel size (square) + optional uint32 kernel_h = 5; // The kernel height + optional uint32 kernel_w = 6; // The kernel width + optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X) + optional uint32 stride_h = 7; // The stride height + optional uint32 stride_w = 8; // The stride width + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 11 [default = DEFAULT]; + // If global_pooling then it will pool over the size of the bottom by doing + // kernel_h = bottom->height and kernel_w = bottom->width + optional bool global_pooling = 12 [default = false]; +} + +message PowerParameter { + // PowerLayer computes outputs y = (shift + scale * x) ^ power. + optional float power = 1 [default = 1.0]; + optional float scale = 2 [default = 1.0]; + optional float shift = 3 [default = 0.0]; +} + +message PythonParameter { + optional string module = 1; + optional string layer = 2; + // This value is set to the attribute `param_str` of the `PythonLayer` object + // in Python before calling the `setup()` method. This could be a number, + // string, dictionary in Python dict format, JSON, etc. You may parse this + // string in `setup` method and use it in `forward` and `backward`. + optional string param_str = 3 [default = '']; + // Whether this PythonLayer is shared among worker solvers during data parallelism. + // If true, each worker solver sequentially run forward from this layer. + // This value should be set true if you are using it as a data layer. + optional bool share_in_parallel = 4 [default = false]; +} + +// Message that stores parameters used by RecurrentLayer +message RecurrentParameter { + // The dimension of the output (and usually hidden state) representation -- + // must be explicitly set to non-zero. + optional uint32 num_output = 1 [default = 0]; + + optional FillerParameter weight_filler = 2; // The filler for the weight + optional FillerParameter bias_filler = 3; // The filler for the bias + + // Whether to enable displaying debug_info in the unrolled recurrent net. + optional bool debug_info = 4 [default = false]; + + // Whether to add as additional inputs (bottoms) the initial hidden state + // blobs, and add as additional outputs (tops) the final timestep hidden state + // blobs. The number of additional bottom/top blobs required depends on the + // recurrent architecture -- e.g., 1 for RNNs, 2 for LSTMs. + optional bool expose_hidden = 5 [default = false]; +} + +// Message that stores parameters used by ReductionLayer +message ReductionParameter { + enum ReductionOp { + SUM = 1; + ASUM = 2; + SUMSQ = 3; + MEAN = 4; + } + + optional ReductionOp operation = 1 [default = SUM]; // reduction operation + + // The first axis to reduce to a scalar -- may be negative to index from the + // end (e.g., -1 for the last axis). + // (Currently, only reduction along ALL "tail" axes is supported; reduction + // of axis M through N, where N < num_axes - 1, is unsupported.) + // Suppose we have an n-axis bottom Blob with shape: + // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)). + // If axis == m, the output Blob will have shape + // (d0, d1, d2, ..., d(m-1)), + // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1)) + // times, each including (dm * d(m+1) * ... * d(n-1)) individual data. + // If axis == 0 (the default), the output Blob always has the empty shape + // (count 1), performing reduction across the entire input -- + // often useful for creating new loss functions. + optional int32 axis = 2 [default = 0]; + + optional float coeff = 3 [default = 1.0]; // coefficient for output +} + +// Message that stores parameters used by ReLULayer +message ReLUParameter { + // Allow non-zero slope for negative inputs to speed up optimization + // Described in: + // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities + // improve neural network acoustic models. In ICML Workshop on Deep Learning + // for Audio, Speech, and Language Processing. + optional float negative_slope = 1 [default = 0]; + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 2 [default = DEFAULT]; +} + +message ReshapeParameter { + // Specify the output dimensions. If some of the dimensions are set to 0, + // the corresponding dimension from the bottom layer is used (unchanged). + // Exactly one dimension may be set to -1, in which case its value is + // inferred from the count of the bottom blob and the remaining dimensions. + // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8: + // + // layer { + // type: "Reshape" bottom: "input" top: "output" + // reshape_param { ... } + // } + // + // If "input" is 2D with shape 2 x 8, then the following reshape_param + // specifications are all equivalent, producing a 3D blob "output" with shape + // 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 0 dim: 2 dim: -1 } } + // reshape_param { shape { dim: 0 dim:-1 dim: 4 } } + // + optional BlobShape shape = 1; + + // axis and num_axes control the portion of the bottom blob's shape that are + // replaced by (included in) the reshape. By default (axis == 0 and + // num_axes == -1), the entire bottom blob shape is included in the reshape, + // and hence the shape field must specify the entire output shape. + // + // axis may be non-zero to retain some portion of the beginning of the input + // shape (and may be negative to index from the end; e.g., -1 to begin the + // reshape after the last axis, including nothing in the reshape, + // -2 to include only the last axis, etc.). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are all equivalent, + // producing a blob "output" with shape 2 x 2 x 4: + // + // reshape_param { shape { dim: 2 dim: 2 dim: 4 } } + // reshape_param { shape { dim: 2 dim: 4 } axis: 1 } + // reshape_param { shape { dim: 2 dim: 4 } axis: -3 } + // + // num_axes specifies the extent of the reshape. + // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on + // input axes in the range [axis, axis+num_axes]. + // num_axes may also be -1, the default, to include all remaining axes + // (starting from axis). + // + // For example, suppose "input" is a 2D blob with shape 2 x 8. + // Then the following ReshapeLayer specifications are equivalent, + // producing a blob "output" with shape 1 x 2 x 8. + // + // reshape_param { shape { dim: 1 dim: 2 dim: 8 } } + // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 } + // reshape_param { shape { dim: 1 } num_axes: 0 } + // + // On the other hand, these would produce output blob shape 2 x 1 x 8: + // + // reshape_param { shape { dim: 2 dim: 1 dim: 8 } } + // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 } + // + optional int32 axis = 2 [default = 0]; + optional int32 num_axes = 3 [default = -1]; +} + +message ScaleParameter { + // The first axis of bottom[0] (the first input Blob) along which to apply + // bottom[1] (the second input Blob). May be negative to index from the end + // (e.g., -1 for the last axis). + // + // For example, if bottom[0] is 4D with shape 100x3x40x60, the output + // top[0] will have the same shape, and bottom[1] may have any of the + // following shapes (for the given value of axis): + // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60 + // (axis == 1 == -3) 3; 3x40; 3x40x60 + // (axis == 2 == -2) 40; 40x60 + // (axis == 3 == -1) 60 + // Furthermore, bottom[1] may have the empty shape (regardless of the value of + // "axis") -- a scalar multiplier. + optional int32 axis = 1 [default = 1]; + + // (num_axes is ignored unless just one bottom is given and the scale is + // a learned parameter of the layer. Otherwise, num_axes is determined by the + // number of axes by the second bottom.) + // The number of axes of the input (bottom[0]) covered by the scale + // parameter, or -1 to cover all axes of bottom[0] starting from `axis`. + // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar. + optional int32 num_axes = 2 [default = 1]; + + // (filler is ignored unless just one bottom is given and the scale is + // a learned parameter of the layer.) + // The initialization for the learned scale parameter. + // Default is the unit (1) initialization, resulting in the ScaleLayer + // initially performing the identity operation. + optional FillerParameter filler = 3; + + // Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer, but + // may be more efficient). Initialized with bias_filler (defaults to 0). + optional bool bias_term = 4 [default = false]; + optional FillerParameter bias_filler = 5; +} + +message SigmoidParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [default = DEFAULT]; +} + +message SliceParameter { + // The axis along which to slice -- may be negative to index from the end + // (e.g., -1 for the last axis). + // By default, SliceLayer concatenates blobs along the "channels" axis (1). + optional int32 axis = 3 [default = 1]; + repeated uint32 slice_point = 2; + + // DEPRECATED: alias for "axis" -- does not support negative indexing. + optional uint32 slice_dim = 1 [default = 1]; +} + +// Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer +message SoftmaxParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [default = DEFAULT]; + + // The axis along which to perform the softmax -- may be negative to index + // from the end (e.g., -1 for the last axis). + // Any other axes will be evaluated as independent softmaxes. + optional int32 axis = 2 [default = 1]; +} + +message TanHParameter { + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 1 [default = DEFAULT]; +} + +// Message that stores parameters used by TileLayer +message TileParameter { + // The index of the axis to tile. + optional int32 axis = 1 [default = 1]; + + // The number of copies (tiles) of the blob to output. + optional int32 tiles = 2; +} + +// Message that stores parameters used by ThresholdLayer +message ThresholdParameter { + optional float threshold = 1 [default = 0]; // Strictly positive values +} + +message WindowDataParameter { + // Specify the data source. + optional string source = 1; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 2 [default = 1]; + optional string mean_file = 3; + // Specify the batch size. + optional uint32 batch_size = 4; + // Specify if we would like to randomly crop an image. + optional uint32 crop_size = 5 [default = 0]; + // Specify if we want to randomly mirror data. + optional bool mirror = 6 [default = false]; + // Foreground (object) overlap threshold + optional float fg_threshold = 7 [default = 0.5]; + // Background (non-object) overlap threshold + optional float bg_threshold = 8 [default = 0.5]; + // Fraction of batch that should be foreground objects + optional float fg_fraction = 9 [default = 0.25]; + // Amount of contextual padding to add around a window + // (used only by the window_data_layer) + optional uint32 context_pad = 10 [default = 0]; + // Mode for cropping out a detection window + // warp: cropped window is warped to a fixed size and aspect ratio + // square: the tightest square around the window is cropped + optional string crop_mode = 11 [default = "warp"]; + // cache_images: will load all images in memory for faster access + optional bool cache_images = 12 [default = false]; + // append root_folder to locate images + optional string root_folder = 13 [default = ""]; +} + +message SPPParameter { + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional uint32 pyramid_height = 1; + optional PoolMethod pool = 2 [default = MAX]; // The pooling method + enum Engine { + DEFAULT = 0; + CAFFE = 1; + CUDNN = 2; + } + optional Engine engine = 6 [default = DEFAULT]; +} + +// DEPRECATED: use LayerParameter. +message V1LayerParameter { + repeated string bottom = 2; + repeated string top = 3; + optional string name = 4; + repeated NetStateRule include = 32; + repeated NetStateRule exclude = 33; + enum LayerType { + NONE = 0; + ABSVAL = 35; + ACCURACY = 1; + ARGMAX = 30; + BNLL = 2; + CONCAT = 3; + CONTRASTIVE_LOSS = 37; + CONVOLUTION = 4; + DATA = 5; + DECONVOLUTION = 39; + DROPOUT = 6; + DUMMY_DATA = 32; + EUCLIDEAN_LOSS = 7; + ELTWISE = 25; + EXP = 38; + FLATTEN = 8; + HDF5_DATA = 9; + HDF5_OUTPUT = 10; + HINGE_LOSS = 28; + IM2COL = 11; + IMAGE_DATA = 12; + INFOGAIN_LOSS = 13; + INNER_PRODUCT = 14; + LRN = 15; + MEMORY_DATA = 29; + MULTINOMIAL_LOGISTIC_LOSS = 16; + MVN = 34; + POOLING = 17; + POWER = 26; + RELU = 18; + SIGMOID = 19; + SIGMOID_CROSS_ENTROPY_LOSS = 27; + SILENCE = 36; + SOFTMAX = 20; + SOFTMAX_LOSS = 21; + SPLIT = 22; + SLICE = 33; + TANH = 23; + WINDOW_DATA = 24; + THRESHOLD = 31; + } + optional LayerType type = 5; + repeated BlobProto blobs = 6; + repeated string param = 1001; + repeated DimCheckMode blob_share_mode = 1002; + enum DimCheckMode { + STRICT = 0; + PERMISSIVE = 1; + } + repeated float blobs_lr = 7; + repeated float weight_decay = 8; + repeated float loss_weight = 35; + optional AccuracyParameter accuracy_param = 27; + optional ArgMaxParameter argmax_param = 23; + optional ConcatParameter concat_param = 9; + optional ContrastiveLossParameter contrastive_loss_param = 40; + optional ConvolutionParameter convolution_param = 10; + optional DataParameter data_param = 11; + optional DropoutParameter dropout_param = 12; + optional DummyDataParameter dummy_data_param = 26; + optional EltwiseParameter eltwise_param = 24; + optional ExpParameter exp_param = 41; + optional HDF5DataParameter hdf5_data_param = 13; + optional HDF5OutputParameter hdf5_output_param = 14; + optional HingeLossParameter hinge_loss_param = 29; + optional ImageDataParameter image_data_param = 15; + optional InfogainLossParameter infogain_loss_param = 16; + optional InnerProductParameter inner_product_param = 17; + optional LRNParameter lrn_param = 18; + optional MemoryDataParameter memory_data_param = 22; + optional MVNParameter mvn_param = 34; + optional PoolingParameter pooling_param = 19; + optional PowerParameter power_param = 21; + optional ReLUParameter relu_param = 30; + optional SigmoidParameter sigmoid_param = 38; + optional SoftmaxParameter softmax_param = 39; + optional SliceParameter slice_param = 31; + optional TanHParameter tanh_param = 37; + optional ThresholdParameter threshold_param = 25; + optional WindowDataParameter window_data_param = 20; + optional TransformationParameter transform_param = 36; + optional LossParameter loss_param = 42; + optional V0LayerParameter layer = 1; +} + +// DEPRECATED: V0LayerParameter is the old way of specifying layer parameters +// in Caffe. We keep this message type around for legacy support. +message V0LayerParameter { + optional string name = 1; // the layer name + optional string type = 2; // the string to specify the layer type + + // Parameters to specify layers with inner products. + optional uint32 num_output = 3; // The number of outputs for the layer + optional bool biasterm = 4 [default = true]; // whether to have bias terms + optional FillerParameter weight_filler = 5; // The filler for the weight + optional FillerParameter bias_filler = 6; // The filler for the bias + + optional uint32 pad = 7 [default = 0]; // The padding size + optional uint32 kernelsize = 8; // The kernel size + optional uint32 group = 9 [default = 1]; // The group size for group conv + optional uint32 stride = 10 [default = 1]; // The stride + enum PoolMethod { + MAX = 0; + AVE = 1; + STOCHASTIC = 2; + } + optional PoolMethod pool = 11 [default = MAX]; // The pooling method + optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio + + optional uint32 local_size = 13 [default = 5]; // for local response norm + optional float alpha = 14 [default = 1.]; // for local response norm + optional float beta = 15 [default = 0.75]; // for local response norm + optional float k = 22 [default = 1.]; + + // For data layers, specify the data source + optional string source = 16; + // For data pre-processing, we can do simple scaling and subtracting the + // data mean, if provided. Note that the mean subtraction is always carried + // out before scaling. + optional float scale = 17 [default = 1]; + optional string meanfile = 18; + // For data layers, specify the batch size. + optional uint32 batchsize = 19; + // For data layers, specify if we would like to randomly crop an image. + optional uint32 cropsize = 20 [default = 0]; + // For data layers, specify if we want to randomly mirror data. + optional bool mirror = 21 [default = false]; + + // The blobs containing the numeric parameters of the layer + repeated BlobProto blobs = 50; + // The ratio that is multiplied on the global learning rate. If you want to + // set the learning ratio for one blob, you need to set it for all blobs. + repeated float blobs_lr = 51; + // The weight decay that is multiplied on the global weight decay. + repeated float weight_decay = 52; + + // The rand_skip variable is for the data layer to skip a few data points + // to avoid all asynchronous sgd clients to start at the same point. The skip + // point would be set as rand_skip * rand(0,1). Note that rand_skip should not + // be larger than the number of keys in the database. + optional uint32 rand_skip = 53 [default = 0]; + + // Fields related to detection (det_*) + // foreground (object) overlap threshold + optional float det_fg_threshold = 54 [default = 0.5]; + // background (non-object) overlap threshold + optional float det_bg_threshold = 55 [default = 0.5]; + // Fraction of batch that should be foreground objects + optional float det_fg_fraction = 56 [default = 0.25]; + + // optional bool OBSOLETE_can_clobber = 57 [default = true]; + + // Amount of contextual padding to add around a window + // (used only by the window_data_layer) + optional uint32 det_context_pad = 58 [default = 0]; + + // Mode for cropping out a detection window + // warp: cropped window is warped to a fixed size and aspect ratio + // square: the tightest square around the window is cropped + optional string det_crop_mode = 59 [default = "warp"]; + + // For ReshapeLayer, one needs to specify the new dimensions. + optional int32 new_num = 60 [default = 0]; + optional int32 new_channels = 61 [default = 0]; + optional int32 new_height = 62 [default = 0]; + optional int32 new_width = 63 [default = 0]; + + // Whether or not ImageLayer should shuffle the list of files at every epoch. + // It will also resize images if new_height or new_width are not zero. + optional bool shuffle_images = 64 [default = false]; + + // For ConcatLayer, one needs to specify the dimension for concatenation, and + // the other dimensions must be the same for all the bottom blobs. + // By default it will concatenate blobs along the channels dimension. + optional uint32 concat_dim = 65 [default = 1]; + + optional HDF5OutputParameter hdf5_output_param = 1001; +} + +message PReLUParameter { + // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers: + // Surpassing Human-Level Performance on ImageNet Classification, 2015. + + // Initial value of a_i. Default is a_i=0.25 for all i. + optional FillerParameter filler = 1; + // Whether or not slope parameters are shared across channels. + optional bool channel_shared = 2 [default = false]; +} diff --git a/kaffe/caffe/caffepb.py b/kaffe/caffe/caffepb.py index 56233d1..99f768a 100644 --- a/kaffe/caffe/caffepb.py +++ b/kaffe/caffe/caffepb.py @@ -1,20 +1,28 @@ # Generated by the protocol buffer compiler. DO NOT EDIT! # source: caffe.proto +import sys +_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf.internal import enum_type_wrapper from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection +from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) +_sym_db = _symbol_database.Default() + DESCRIPTOR = _descriptor.FileDescriptor( name='caffe.proto', package='caffe', - serialized_pb='\n\x0b\x63\x61\x66\x66\x65.proto\x12\x05\x63\x61\x66\x66\x65\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xcc\x01\n\tBlobProto\x12\x1f\n\x05shape\x18\x07 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"2\n\x0f\x42lobProtoVector\x12\x1f\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x10.caffe.BlobProto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"\x8a\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x42\n\rvariance_norm\x18\x08 \x01(\x0e\x32#.caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\x8e\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12%\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x05state\x18\x06 \x01(\x0b\x32\x0f.caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12$\n\x05layer\x18\x64 \x03(\x0b\x32\x15.caffe.LayerParameter\x12\'\n\x06layers\x18\x02 \x03(\x0b\x32\x17.caffe.V1LayerParameter\"\x9c\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12&\n\tnet_param\x18\x19 \x01(\x0b\x32\x13.caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12,\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x13.caffe.NetParameter\x12+\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x13.caffe.NetParameter\x12$\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x0f.caffe.NetState\x12#\n\ntest_state\x18\x1b \x03(\x0b\x32\x0f.caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12K\n\x0fsnapshot_format\x18% \x01(\x0e\x32%.caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12;\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32!.caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x14\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x05\x31\x65-08\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x11\n\trms_decay\x18& \x01(\x02\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12;\n\x0bsolver_type\x18\x1e \x01(\x0e\x32!.caffe.SolverParameter.SolverType:\x03SGD\"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05\"l\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12!\n\x07history\x18\x03 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\"N\n\x08NetState\x12!\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"s\n\x0cNetStateRule\x12\x1b\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\"\xa3\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\nshare_mode\x18\x02 \x01(\x0e\x32\x1d.caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\x98\x13\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1b\n\x05phase\x18\n \x01(\x0e\x32\x0c.caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\x1f\n\x05param\x18\x06 \x03(\x0b\x32\x10.caffe.ParamSpec\x12\x1f\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12$\n\x07include\x18\x08 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18\t \x03(\x0b\x32\x13.caffe.NetStateRule\x12\x37\n\x0ftransform_param\x18\x64 \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18\x65 \x01(\x0b\x32\x14.caffe.LossParameter\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12\x34\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x19.caffe.BatchNormParameter\x12)\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x14.caffe.BiasParameter\x12,\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12)\n\ncrop_param\x18\x90\x01 \x01(\x0b\x32\x14.caffe.CropParameter\x12(\n\ndata_param\x18k \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18l \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18n \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12\'\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x13.caffe.ELUParameter\x12+\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x15.caffe.EmbedParameter\x12&\n\texp_param\x18o \x01(\x0b\x32\x13.caffe.ExpParameter\x12/\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x17.caffe.FlattenParameter\x12\x31\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18s \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18u \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12+\n\x0binput_param\x18\x8f\x01 \x01(\x0b\x32\x15.caffe.InputParameter\x12\'\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x13.caffe.LogParameter\x12&\n\tlrn_param\x18v \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18w \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18x \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18y \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18z \x01(\x0b\x32\x15.caffe.PowerParameter\x12+\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x15.caffe.PReLUParameter\x12-\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x16.caffe.PythonParameter\x12\x33\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x19.caffe.ReductionParameter\x12(\n\nrelu_param\x18{ \x01(\x0b\x32\x14.caffe.ReLUParameter\x12/\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x17.caffe.ReshapeParameter\x12+\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x15.caffe.ScaleParameter\x12.\n\rsigmoid_param\x18| \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18} \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12\'\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x13.caffe.SPPParameter\x12*\n\x0bslice_param\x18~ \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18\x7f \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x33\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12)\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x14.caffe.TileParameter\x12\x36\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\"\xb6\x01\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\"\xc2\x01\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12\x44\n\rnormalization\x18\x03 \x01(\x0e\x32&.caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08\"B\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\x08\n\x04NONE\x10\x03\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"j\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12&\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x05\x30.999\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-05\"]\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\xfc\x03\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12-\n\rweight_filler\x18\x07 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x16.caffe.FillerParameter\x12;\n\x06\x65ngine\x18\x0f \x01(\x0e\x32\".caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"0\n\rCropParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x32\x12\x0e\n\x06offset\x18\x02 \x03(\r\"\xa4\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x31\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x17.caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x34\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\".\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\"\xa0\x01\n\x12\x44ummyDataParameter\x12+\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x16.caffe.FillerParameter\x12\x1f\n\x05shape\x18\x06 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xa5\x01\n\x10\x45ltwiseParameter\x12\x39\n\toperation\x18\x01 \x01(\x0e\x32!.caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31\"\xac\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"^\n\x12HingeLossParameter\x12\x30\n\x04norm\x18\x01 \x01(\x0e\x32\x1e.caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"\'\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\"\xcb\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\x12\x18\n\ttranspose\x18\x06 \x01(\x08:\x05\x66\x61lse\"1\n\x0eInputParameter\x12\x1f\n\x05shape\x18\x01 \x03(\x0b\x32\x10.caffe.BlobShape\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xb8\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12\x44\n\x0bnorm_region\x18\x04 \x01(\x0e\x32\x1e.caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\"d\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-09\"\xa2\x03\n\x10PoolingParameter\x12\x35\n\x04pool\x18\x01 \x01(\x0e\x32\".caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12\x37\n\x06\x65ngine\x18\x0b \x01(\x0e\x32\x1e.caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xad\x01\n\x12ReductionParameter\x12=\n\toperation\x18\x01 \x01(\x0e\x32%.caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\"\x8d\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x34\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1b.caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x10ReshapeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"\xa5\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"x\n\x10SigmoidParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\"\x89\x01\n\x10SoftmaxParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"r\n\rTanHParameter\x12\x34\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1b.caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"/\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xeb\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x31\n\x04pool\x18\x02 \x01(\x0e\x32\x1e.caffe.SPPParameter.PoolMethod:\x03MAX\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xe0\x13\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12$\n\x07include\x18 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18! \x03(\x0b\x32\x13.caffe.NetStateRule\x12/\n\x04type\x18\x05 \x01(\x0e\x32!.caffe.V1LayerParameter.LayerType\x12\x1f\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12>\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32$.caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12,\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18\x0b \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18\x0c \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18\x18 \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12&\n\texp_param\x18) \x01(\x0b\x32\x13.caffe.ExpParameter\x12\x31\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12&\n\tlrn_param\x18\x12 \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18\" \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18\x13 \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x15.caffe.PowerParameter\x12(\n\nrelu_param\x18\x1e \x01(\x0b\x32\x14.caffe.ReLUParameter\x12.\n\rsigmoid_param\x18& \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18\' \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12*\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18% \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x32\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12\x35\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x37\n\x0ftransform_param\x18$ \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18* \x01(\x0b\x32\x14.caffe.LossParameter\x12&\n\x05layer\x18\x01 \x01(\x0b\x32\x17.caffe.V0LayerParameter\"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xfd\x07\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x35\n\x04pool\x18\x0b \x01(\x0e\x32\".caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x36\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"W\n\x0ePReLUParameter\x12&\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01') + syntax='proto2', + serialized_pb=_b('\n\x0b\x63\x61\x66\x66\x65.proto\x12\x05\x63\x61\x66\x66\x65\"\x1c\n\tBlobShape\x12\x0f\n\x03\x64im\x18\x01 \x03(\x03\x42\x02\x10\x01\"\xcc\x01\n\tBlobProto\x12\x1f\n\x05shape\x18\x07 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x10\n\x04\x64\x61ta\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x10\n\x04\x64iff\x18\x06 \x03(\x02\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_data\x18\x08 \x03(\x01\x42\x02\x10\x01\x12\x17\n\x0b\x64ouble_diff\x18\t \x03(\x01\x42\x02\x10\x01\x12\x0e\n\x03num\x18\x01 \x01(\x05:\x01\x30\x12\x13\n\x08\x63hannels\x18\x02 \x01(\x05:\x01\x30\x12\x11\n\x06height\x18\x03 \x01(\x05:\x01\x30\x12\x10\n\x05width\x18\x04 \x01(\x05:\x01\x30\"2\n\x0f\x42lobProtoVector\x12\x1f\n\x05\x62lobs\x18\x01 \x03(\x0b\x32\x10.caffe.BlobProto\"\x81\x01\n\x05\x44\x61tum\x12\x10\n\x08\x63hannels\x18\x01 \x01(\x05\x12\x0e\n\x06height\x18\x02 \x01(\x05\x12\r\n\x05width\x18\x03 \x01(\x05\x12\x0c\n\x04\x64\x61ta\x18\x04 \x01(\x0c\x12\r\n\x05label\x18\x05 \x01(\x05\x12\x12\n\nfloat_data\x18\x06 \x03(\x02\x12\x16\n\x07\x65ncoded\x18\x07 \x01(\x08:\x05\x66\x61lse\"\x8a\x02\n\x0f\x46illerParameter\x12\x16\n\x04type\x18\x01 \x01(\t:\x08\x63onstant\x12\x10\n\x05value\x18\x02 \x01(\x02:\x01\x30\x12\x0e\n\x03min\x18\x03 \x01(\x02:\x01\x30\x12\x0e\n\x03max\x18\x04 \x01(\x02:\x01\x31\x12\x0f\n\x04mean\x18\x05 \x01(\x02:\x01\x30\x12\x0e\n\x03std\x18\x06 \x01(\x02:\x01\x31\x12\x12\n\x06sparse\x18\x07 \x01(\x05:\x02-1\x12\x42\n\rvariance_norm\x18\x08 \x01(\x0e\x32#.caffe.FillerParameter.VarianceNorm:\x06\x46\x41N_IN\"4\n\x0cVarianceNorm\x12\n\n\x06\x46\x41N_IN\x10\x00\x12\x0b\n\x07\x46\x41N_OUT\x10\x01\x12\x0b\n\x07\x41VERAGE\x10\x02\"\x8e\x02\n\x0cNetParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\r\n\x05input\x18\x03 \x03(\t\x12%\n\x0binput_shape\x18\x08 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x11\n\tinput_dim\x18\x04 \x03(\x05\x12\x1d\n\x0e\x66orce_backward\x18\x05 \x01(\x08:\x05\x66\x61lse\x12\x1e\n\x05state\x18\x06 \x01(\x0b\x32\x0f.caffe.NetState\x12\x19\n\ndebug_info\x18\x07 \x01(\x08:\x05\x66\x61lse\x12$\n\x05layer\x18\x64 \x03(\x0b\x32\x15.caffe.LayerParameter\x12\'\n\x06layers\x18\x02 \x03(\x0b\x32\x17.caffe.V1LayerParameter\"\xc3\n\n\x0fSolverParameter\x12\x0b\n\x03net\x18\x18 \x01(\t\x12&\n\tnet_param\x18\x19 \x01(\x0b\x32\x13.caffe.NetParameter\x12\x11\n\ttrain_net\x18\x01 \x01(\t\x12\x10\n\x08test_net\x18\x02 \x03(\t\x12,\n\x0ftrain_net_param\x18\x15 \x01(\x0b\x32\x13.caffe.NetParameter\x12+\n\x0etest_net_param\x18\x16 \x03(\x0b\x32\x13.caffe.NetParameter\x12$\n\x0btrain_state\x18\x1a \x01(\x0b\x32\x0f.caffe.NetState\x12#\n\ntest_state\x18\x1b \x03(\x0b\x32\x0f.caffe.NetState\x12\x11\n\ttest_iter\x18\x03 \x03(\x05\x12\x18\n\rtest_interval\x18\x04 \x01(\x05:\x01\x30\x12 \n\x11test_compute_loss\x18\x13 \x01(\x08:\x05\x66\x61lse\x12!\n\x13test_initialization\x18 \x01(\x08:\x04true\x12\x0f\n\x07\x62\x61se_lr\x18\x05 \x01(\x02\x12\x0f\n\x07\x64isplay\x18\x06 \x01(\x05\x12\x17\n\x0c\x61verage_loss\x18! \x01(\x05:\x01\x31\x12\x10\n\x08max_iter\x18\x07 \x01(\x05\x12\x14\n\titer_size\x18$ \x01(\x05:\x01\x31\x12\x11\n\tlr_policy\x18\x08 \x01(\t\x12\r\n\x05gamma\x18\t \x01(\x02\x12\r\n\x05power\x18\n \x01(\x02\x12\x10\n\x08momentum\x18\x0b \x01(\x02\x12\x14\n\x0cweight_decay\x18\x0c \x01(\x02\x12\x1f\n\x13regularization_type\x18\x1d \x01(\t:\x02L2\x12\x10\n\x08stepsize\x18\r \x01(\x05\x12\x11\n\tstepvalue\x18\" \x03(\x05\x12\x1a\n\x0e\x63lip_gradients\x18# \x01(\x02:\x02-1\x12\x13\n\x08snapshot\x18\x0e \x01(\x05:\x01\x30\x12\x17\n\x0fsnapshot_prefix\x18\x0f \x01(\t\x12\x1c\n\rsnapshot_diff\x18\x10 \x01(\x08:\x05\x66\x61lse\x12K\n\x0fsnapshot_format\x18% \x01(\x0e\x32%.caffe.SolverParameter.SnapshotFormat:\x0b\x42INARYPROTO\x12;\n\x0bsolver_mode\x18\x11 \x01(\x0e\x32!.caffe.SolverParameter.SolverMode:\x03GPU\x12\x14\n\tdevice_id\x18\x12 \x01(\x05:\x01\x30\x12\x17\n\x0brandom_seed\x18\x14 \x01(\x03:\x02-1\x12\x11\n\x04type\x18( \x01(\t:\x03SGD\x12\x14\n\x05\x64\x65lta\x18\x1f \x01(\x02:\x05\x31\x65-08\x12\x18\n\tmomentum2\x18\' \x01(\x02:\x05\x30.999\x12\x17\n\trms_decay\x18& \x01(\x02:\x04\x30.99\x12\x19\n\ndebug_info\x18\x17 \x01(\x08:\x05\x66\x61lse\x12\"\n\x14snapshot_after_train\x18\x1c \x01(\x08:\x04true\x12;\n\x0bsolver_type\x18\x1e \x01(\x0e\x32!.caffe.SolverParameter.SolverType:\x03SGD\x12\x1f\n\x11layer_wise_reduce\x18) \x01(\x08:\x04true\"+\n\x0eSnapshotFormat\x12\x08\n\x04HDF5\x10\x00\x12\x0f\n\x0b\x42INARYPROTO\x10\x01\"\x1e\n\nSolverMode\x12\x07\n\x03\x43PU\x10\x00\x12\x07\n\x03GPU\x10\x01\"U\n\nSolverType\x12\x07\n\x03SGD\x10\x00\x12\x0c\n\x08NESTEROV\x10\x01\x12\x0b\n\x07\x41\x44\x41GRAD\x10\x02\x12\x0b\n\x07RMSPROP\x10\x03\x12\x0c\n\x08\x41\x44\x41\x44\x45LTA\x10\x04\x12\x08\n\x04\x41\x44\x41M\x10\x05\"l\n\x0bSolverState\x12\x0c\n\x04iter\x18\x01 \x01(\x05\x12\x13\n\x0blearned_net\x18\x02 \x01(\t\x12!\n\x07history\x18\x03 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x17\n\x0c\x63urrent_step\x18\x04 \x01(\x05:\x01\x30\"N\n\x08NetState\x12!\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase:\x04TEST\x12\x10\n\x05level\x18\x02 \x01(\x05:\x01\x30\x12\r\n\x05stage\x18\x03 \x03(\t\"s\n\x0cNetStateRule\x12\x1b\n\x05phase\x18\x01 \x01(\x0e\x32\x0c.caffe.Phase\x12\x11\n\tmin_level\x18\x02 \x01(\x05\x12\x11\n\tmax_level\x18\x03 \x01(\x05\x12\r\n\x05stage\x18\x04 \x03(\t\x12\x11\n\tnot_stage\x18\x05 \x03(\t\"\xa3\x01\n\tParamSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x31\n\nshare_mode\x18\x02 \x01(\x0e\x32\x1d.caffe.ParamSpec.DimCheckMode\x12\x12\n\x07lr_mult\x18\x03 \x01(\x02:\x01\x31\x12\x15\n\ndecay_mult\x18\x04 \x01(\x02:\x01\x31\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\x82\x14\n\x0eLayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x0e\n\x06\x62ottom\x18\x03 \x03(\t\x12\x0b\n\x03top\x18\x04 \x03(\t\x12\x1b\n\x05phase\x18\n \x01(\x0e\x32\x0c.caffe.Phase\x12\x13\n\x0bloss_weight\x18\x05 \x03(\x02\x12\x1f\n\x05param\x18\x06 \x03(\x0b\x32\x10.caffe.ParamSpec\x12\x1f\n\x05\x62lobs\x18\x07 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x16\n\x0epropagate_down\x18\x0b \x03(\x08\x12$\n\x07include\x18\x08 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18\t \x03(\x0b\x32\x13.caffe.NetStateRule\x12\x37\n\x0ftransform_param\x18\x64 \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18\x65 \x01(\x0b\x32\x14.caffe.LossParameter\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x66 \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18g \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12\x34\n\x10\x62\x61tch_norm_param\x18\x8b\x01 \x01(\x0b\x32\x19.caffe.BatchNormParameter\x12)\n\nbias_param\x18\x8d\x01 \x01(\x0b\x32\x14.caffe.BiasParameter\x12,\n\x0c\x63oncat_param\x18h \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18i \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18j \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12)\n\ncrop_param\x18\x90\x01 \x01(\x0b\x32\x14.caffe.CropParameter\x12(\n\ndata_param\x18k \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18l \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18m \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18n \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12\'\n\telu_param\x18\x8c\x01 \x01(\x0b\x32\x13.caffe.ELUParameter\x12+\n\x0b\x65mbed_param\x18\x89\x01 \x01(\x0b\x32\x15.caffe.EmbedParameter\x12&\n\texp_param\x18o \x01(\x0b\x32\x13.caffe.ExpParameter\x12/\n\rflatten_param\x18\x87\x01 \x01(\x0b\x32\x17.caffe.FlattenParameter\x12\x31\n\x0fhdf5_data_param\x18p \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18q \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18r \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18s \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18t \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18u \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12+\n\x0binput_param\x18\x8f\x01 \x01(\x0b\x32\x15.caffe.InputParameter\x12\'\n\tlog_param\x18\x86\x01 \x01(\x0b\x32\x13.caffe.LogParameter\x12&\n\tlrn_param\x18v \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18w \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18x \x01(\x0b\x32\x13.caffe.MVNParameter\x12\x33\n\x0fparameter_param\x18\x91\x01 \x01(\x0b\x32\x19.caffe.ParameterParameter\x12.\n\rpooling_param\x18y \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18z \x01(\x0b\x32\x15.caffe.PowerParameter\x12+\n\x0bprelu_param\x18\x83\x01 \x01(\x0b\x32\x15.caffe.PReLUParameter\x12-\n\x0cpython_param\x18\x82\x01 \x01(\x0b\x32\x16.caffe.PythonParameter\x12\x33\n\x0frecurrent_param\x18\x92\x01 \x01(\x0b\x32\x19.caffe.RecurrentParameter\x12\x33\n\x0freduction_param\x18\x88\x01 \x01(\x0b\x32\x19.caffe.ReductionParameter\x12(\n\nrelu_param\x18{ \x01(\x0b\x32\x14.caffe.ReLUParameter\x12/\n\rreshape_param\x18\x85\x01 \x01(\x0b\x32\x17.caffe.ReshapeParameter\x12+\n\x0bscale_param\x18\x8e\x01 \x01(\x0b\x32\x15.caffe.ScaleParameter\x12.\n\rsigmoid_param\x18| \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18} \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12\'\n\tspp_param\x18\x84\x01 \x01(\x0b\x32\x13.caffe.SPPParameter\x12*\n\x0bslice_param\x18~ \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18\x7f \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x33\n\x0fthreshold_param\x18\x80\x01 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12)\n\ntile_param\x18\x8a\x01 \x01(\x0b\x32\x14.caffe.TileParameter\x12\x36\n\x11window_data_param\x18\x81\x01 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\"\xb6\x01\n\x17TransformationParameter\x12\x10\n\x05scale\x18\x01 \x01(\x02:\x01\x31\x12\x15\n\x06mirror\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x14\n\tcrop_size\x18\x03 \x01(\r:\x01\x30\x12\x11\n\tmean_file\x18\x04 \x01(\t\x12\x12\n\nmean_value\x18\x05 \x03(\x02\x12\x1a\n\x0b\x66orce_color\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\nforce_gray\x18\x07 \x01(\x08:\x05\x66\x61lse\"\xc2\x01\n\rLossParameter\x12\x14\n\x0cignore_label\x18\x01 \x01(\x05\x12\x44\n\rnormalization\x18\x03 \x01(\x0e\x32&.caffe.LossParameter.NormalizationMode:\x05VALID\x12\x11\n\tnormalize\x18\x02 \x01(\x08\"B\n\x11NormalizationMode\x12\x08\n\x04\x46ULL\x10\x00\x12\t\n\x05VALID\x10\x01\x12\x0e\n\nBATCH_SIZE\x10\x02\x12\x08\n\x04NONE\x10\x03\"L\n\x11\x41\x63\x63uracyParameter\x12\x10\n\x05top_k\x18\x01 \x01(\r:\x01\x31\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x14\n\x0cignore_label\x18\x03 \x01(\x05\"M\n\x0f\x41rgMaxParameter\x12\x1a\n\x0bout_max_val\x18\x01 \x01(\x08:\x05\x66\x61lse\x12\x10\n\x05top_k\x18\x02 \x01(\r:\x01\x31\x12\x0c\n\x04\x61xis\x18\x03 \x01(\x05\"9\n\x0f\x43oncatParameter\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\x12\x15\n\nconcat_dim\x18\x01 \x01(\r:\x01\x31\"j\n\x12\x42\x61tchNormParameter\x12\x18\n\x10use_global_stats\x18\x01 \x01(\x08\x12&\n\x17moving_average_fraction\x18\x02 \x01(\x02:\x05\x30.999\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-05\"]\n\rBiasParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\"L\n\x18\x43ontrastiveLossParameter\x12\x11\n\x06margin\x18\x01 \x01(\x02:\x01\x31\x12\x1d\n\x0elegacy_version\x18\x02 \x01(\x08:\x05\x66\x61lse\"\xfc\x03\n\x14\x43onvolutionParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12\x0b\n\x03pad\x18\x03 \x03(\r\x12\x13\n\x0bkernel_size\x18\x04 \x03(\r\x12\x0e\n\x06stride\x18\x06 \x03(\r\x12\x10\n\x08\x64ilation\x18\x12 \x03(\r\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x10\n\x08kernel_h\x18\x0b \x01(\r\x12\x10\n\x08kernel_w\x18\x0c \x01(\r\x12\x10\n\x08stride_h\x18\r \x01(\r\x12\x10\n\x08stride_w\x18\x0e \x01(\r\x12\x10\n\x05group\x18\x05 \x01(\r:\x01\x31\x12-\n\rweight_filler\x18\x07 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x08 \x01(\x0b\x32\x16.caffe.FillerParameter\x12;\n\x06\x65ngine\x18\x0f \x01(\x0e\x32\".caffe.ConvolutionParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x10 \x01(\x05:\x01\x31\x12\x1e\n\x0f\x66orce_nd_im2col\x18\x11 \x01(\x08:\x05\x66\x61lse\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"0\n\rCropParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x32\x12\x0e\n\x06offset\x18\x02 \x03(\r\"\xa4\x02\n\rDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x31\n\x07\x62\x61\x63kend\x18\x08 \x01(\x0e\x32\x17.caffe.DataParameter.DB:\x07LEVELDB\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\"\n\x13\x66orce_encoded_color\x18\t \x01(\x08:\x05\x66\x61lse\x12\x13\n\x08prefetch\x18\n \x01(\r:\x01\x34\"\x1b\n\x02\x44\x42\x12\x0b\n\x07LEVELDB\x10\x00\x12\x08\n\x04LMDB\x10\x01\".\n\x10\x44ropoutParameter\x12\x1a\n\rdropout_ratio\x18\x01 \x01(\x02:\x03\x30.5\"\xa0\x01\n\x12\x44ummyDataParameter\x12+\n\x0b\x64\x61ta_filler\x18\x01 \x03(\x0b\x32\x16.caffe.FillerParameter\x12\x1f\n\x05shape\x18\x06 \x03(\x0b\x32\x10.caffe.BlobShape\x12\x0b\n\x03num\x18\x02 \x03(\r\x12\x10\n\x08\x63hannels\x18\x03 \x03(\r\x12\x0e\n\x06height\x18\x04 \x03(\r\x12\r\n\x05width\x18\x05 \x03(\r\"\xa5\x01\n\x10\x45ltwiseParameter\x12\x39\n\toperation\x18\x01 \x01(\x0e\x32!.caffe.EltwiseParameter.EltwiseOp:\x03SUM\x12\r\n\x05\x63oeff\x18\x02 \x03(\x02\x12\x1e\n\x10stable_prod_grad\x18\x03 \x01(\x08:\x04true\"\'\n\tEltwiseOp\x12\x08\n\x04PROD\x10\x00\x12\x07\n\x03SUM\x10\x01\x12\x07\n\x03MAX\x10\x02\" \n\x0c\x45LUParameter\x12\x10\n\x05\x61lpha\x18\x01 \x01(\x02:\x01\x31\"\xac\x01\n\x0e\x45mbedParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x11\n\tinput_dim\x18\x02 \x01(\r\x12\x17\n\tbias_term\x18\x03 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"D\n\x0c\x45xpParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"9\n\x10\x46lattenParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x14\n\x08\x65nd_axis\x18\x02 \x01(\x05:\x02-1\"O\n\x11HDF5DataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x12\n\nbatch_size\x18\x02 \x01(\r\x12\x16\n\x07shuffle\x18\x03 \x01(\x08:\x05\x66\x61lse\"(\n\x13HDF5OutputParameter\x12\x11\n\tfile_name\x18\x01 \x01(\t\"^\n\x12HingeLossParameter\x12\x30\n\x04norm\x18\x01 \x01(\x0e\x32\x1e.caffe.HingeLossParameter.Norm:\x02L1\"\x16\n\x04Norm\x12\x06\n\x02L1\x10\x01\x12\x06\n\x02L2\x10\x02\"\x97\x02\n\x12ImageDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x15\n\nbatch_size\x18\x04 \x01(\r:\x01\x31\x12\x14\n\trand_skip\x18\x07 \x01(\r:\x01\x30\x12\x16\n\x07shuffle\x18\x08 \x01(\x08:\x05\x66\x61lse\x12\x15\n\nnew_height\x18\t \x01(\r:\x01\x30\x12\x14\n\tnew_width\x18\n \x01(\r:\x01\x30\x12\x16\n\x08is_color\x18\x0b \x01(\x08:\x04true\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\x0c \x01(\t:\x00\"\'\n\x15InfogainLossParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\"\xcb\x01\n\x15InnerProductParameter\x12\x12\n\nnum_output\x18\x01 \x01(\r\x12\x17\n\tbias_term\x18\x02 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x04 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0f\n\x04\x61xis\x18\x05 \x01(\x05:\x01\x31\x12\x18\n\ttranspose\x18\x06 \x01(\x08:\x05\x66\x61lse\"1\n\x0eInputParameter\x12\x1f\n\x05shape\x18\x01 \x03(\x0b\x32\x10.caffe.BlobShape\"D\n\x0cLogParameter\x12\x10\n\x04\x62\x61se\x18\x01 \x01(\x02:\x02-1\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"\xb8\x02\n\x0cLRNParameter\x12\x15\n\nlocal_size\x18\x01 \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x02 \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x03 \x01(\x02:\x04\x30.75\x12\x44\n\x0bnorm_region\x18\x04 \x01(\x0e\x32\x1e.caffe.LRNParameter.NormRegion:\x0f\x41\x43ROSS_CHANNELS\x12\x0c\n\x01k\x18\x05 \x01(\x02:\x01\x31\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.LRNParameter.Engine:\x07\x44\x45\x46\x41ULT\"5\n\nNormRegion\x12\x13\n\x0f\x41\x43ROSS_CHANNELS\x10\x00\x12\x12\n\x0eWITHIN_CHANNEL\x10\x01\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x13MemoryDataParameter\x12\x12\n\nbatch_size\x18\x01 \x01(\r\x12\x10\n\x08\x63hannels\x18\x02 \x01(\r\x12\x0e\n\x06height\x18\x03 \x01(\r\x12\r\n\x05width\x18\x04 \x01(\r\"d\n\x0cMVNParameter\x12 \n\x12normalize_variance\x18\x01 \x01(\x08:\x04true\x12\x1e\n\x0f\x61\x63ross_channels\x18\x02 \x01(\x08:\x05\x66\x61lse\x12\x12\n\x03\x65ps\x18\x03 \x01(\x02:\x05\x31\x65-09\"5\n\x12ParameterParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\"\xa2\x03\n\x10PoolingParameter\x12\x35\n\x04pool\x18\x01 \x01(\x0e\x32\".caffe.PoolingParameter.PoolMethod:\x03MAX\x12\x0e\n\x03pad\x18\x04 \x01(\r:\x01\x30\x12\x10\n\x05pad_h\x18\t \x01(\r:\x01\x30\x12\x10\n\x05pad_w\x18\n \x01(\r:\x01\x30\x12\x13\n\x0bkernel_size\x18\x02 \x01(\r\x12\x10\n\x08kernel_h\x18\x05 \x01(\r\x12\x10\n\x08kernel_w\x18\x06 \x01(\r\x12\x11\n\x06stride\x18\x03 \x01(\r:\x01\x31\x12\x10\n\x08stride_h\x18\x07 \x01(\r\x12\x10\n\x08stride_w\x18\x08 \x01(\r\x12\x37\n\x06\x65ngine\x18\x0b \x01(\x0e\x32\x1e.caffe.PoolingParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x1d\n\x0eglobal_pooling\x18\x0c \x01(\x08:\x05\x66\x61lse\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"F\n\x0ePowerParameter\x12\x10\n\x05power\x18\x01 \x01(\x02:\x01\x31\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x10\n\x05shift\x18\x03 \x01(\x02:\x01\x30\"g\n\x0fPythonParameter\x12\x0e\n\x06module\x18\x01 \x01(\t\x12\r\n\x05layer\x18\x02 \x01(\t\x12\x13\n\tparam_str\x18\x03 \x01(\t:\x00\x12 \n\x11share_in_parallel\x18\x04 \x01(\x08:\x05\x66\x61lse\"\xc0\x01\n\x12RecurrentParameter\x12\x15\n\nnum_output\x18\x01 \x01(\r:\x01\x30\x12-\n\rweight_filler\x18\x02 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x19\n\ndebug_info\x18\x04 \x01(\x08:\x05\x66\x61lse\x12\x1c\n\rexpose_hidden\x18\x05 \x01(\x08:\x05\x66\x61lse\"\xad\x01\n\x12ReductionParameter\x12=\n\toperation\x18\x01 \x01(\x0e\x32%.caffe.ReductionParameter.ReductionOp:\x03SUM\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x10\n\x05\x63oeff\x18\x03 \x01(\x02:\x01\x31\"5\n\x0bReductionOp\x12\x07\n\x03SUM\x10\x01\x12\x08\n\x04\x41SUM\x10\x02\x12\t\n\x05SUMSQ\x10\x03\x12\x08\n\x04MEAN\x10\x04\"\x8d\x01\n\rReLUParameter\x12\x19\n\x0enegative_slope\x18\x01 \x01(\x02:\x01\x30\x12\x34\n\x06\x65ngine\x18\x02 \x01(\x0e\x32\x1b.caffe.ReLUParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"Z\n\x10ReshapeParameter\x12\x1f\n\x05shape\x18\x01 \x01(\x0b\x32\x10.caffe.BlobShape\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x30\x12\x14\n\x08num_axes\x18\x03 \x01(\x05:\x02-1\"\xa5\x01\n\x0eScaleParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\x13\n\x08num_axes\x18\x02 \x01(\x05:\x01\x31\x12&\n\x06\x66iller\x18\x03 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x18\n\tbias_term\x18\x04 \x01(\x08:\x05\x66\x61lse\x12+\n\x0b\x62ias_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\"x\n\x10SigmoidParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SigmoidParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"L\n\x0eSliceParameter\x12\x0f\n\x04\x61xis\x18\x03 \x01(\x05:\x01\x31\x12\x13\n\x0bslice_point\x18\x02 \x03(\r\x12\x14\n\tslice_dim\x18\x01 \x01(\r:\x01\x31\"\x89\x01\n\x10SoftmaxParameter\x12\x37\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1e.caffe.SoftmaxParameter.Engine:\x07\x44\x45\x46\x41ULT\x12\x0f\n\x04\x61xis\x18\x02 \x01(\x05:\x01\x31\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"r\n\rTanHParameter\x12\x34\n\x06\x65ngine\x18\x01 \x01(\x0e\x32\x1b.caffe.TanHParameter.Engine:\x07\x44\x45\x46\x41ULT\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"/\n\rTileParameter\x12\x0f\n\x04\x61xis\x18\x01 \x01(\x05:\x01\x31\x12\r\n\x05tiles\x18\x02 \x01(\x05\"*\n\x12ThresholdParameter\x12\x14\n\tthreshold\x18\x01 \x01(\x02:\x01\x30\"\xc1\x02\n\x13WindowDataParameter\x12\x0e\n\x06source\x18\x01 \x01(\t\x12\x10\n\x05scale\x18\x02 \x01(\x02:\x01\x31\x12\x11\n\tmean_file\x18\x03 \x01(\t\x12\x12\n\nbatch_size\x18\x04 \x01(\r\x12\x14\n\tcrop_size\x18\x05 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x06 \x01(\x08:\x05\x66\x61lse\x12\x19\n\x0c\x66g_threshold\x18\x07 \x01(\x02:\x03\x30.5\x12\x19\n\x0c\x62g_threshold\x18\x08 \x01(\x02:\x03\x30.5\x12\x19\n\x0b\x66g_fraction\x18\t \x01(\x02:\x04\x30.25\x12\x16\n\x0b\x63ontext_pad\x18\n \x01(\r:\x01\x30\x12\x17\n\tcrop_mode\x18\x0b \x01(\t:\x04warp\x12\x1b\n\x0c\x63\x61\x63he_images\x18\x0c \x01(\x08:\x05\x66\x61lse\x12\x15\n\x0broot_folder\x18\r \x01(\t:\x00\"\xeb\x01\n\x0cSPPParameter\x12\x16\n\x0epyramid_height\x18\x01 \x01(\r\x12\x31\n\x04pool\x18\x02 \x01(\x0e\x32\x1e.caffe.SPPParameter.PoolMethod:\x03MAX\x12\x33\n\x06\x65ngine\x18\x06 \x01(\x0e\x32\x1a.caffe.SPPParameter.Engine:\x07\x44\x45\x46\x41ULT\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"+\n\x06\x45ngine\x12\x0b\n\x07\x44\x45\x46\x41ULT\x10\x00\x12\t\n\x05\x43\x41\x46\x46\x45\x10\x01\x12\t\n\x05\x43UDNN\x10\x02\"\xe0\x13\n\x10V1LayerParameter\x12\x0e\n\x06\x62ottom\x18\x02 \x03(\t\x12\x0b\n\x03top\x18\x03 \x03(\t\x12\x0c\n\x04name\x18\x04 \x01(\t\x12$\n\x07include\x18 \x03(\x0b\x32\x13.caffe.NetStateRule\x12$\n\x07\x65xclude\x18! \x03(\x0b\x32\x13.caffe.NetStateRule\x12/\n\x04type\x18\x05 \x01(\x0e\x32!.caffe.V1LayerParameter.LayerType\x12\x1f\n\x05\x62lobs\x18\x06 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x0e\n\x05param\x18\xe9\x07 \x03(\t\x12>\n\x0f\x62lob_share_mode\x18\xea\x07 \x03(\x0e\x32$.caffe.V1LayerParameter.DimCheckMode\x12\x10\n\x08\x62lobs_lr\x18\x07 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x08 \x03(\x02\x12\x13\n\x0bloss_weight\x18# \x03(\x02\x12\x30\n\x0e\x61\x63\x63uracy_param\x18\x1b \x01(\x0b\x32\x18.caffe.AccuracyParameter\x12,\n\x0c\x61rgmax_param\x18\x17 \x01(\x0b\x32\x16.caffe.ArgMaxParameter\x12,\n\x0c\x63oncat_param\x18\t \x01(\x0b\x32\x16.caffe.ConcatParameter\x12?\n\x16\x63ontrastive_loss_param\x18( \x01(\x0b\x32\x1f.caffe.ContrastiveLossParameter\x12\x36\n\x11\x63onvolution_param\x18\n \x01(\x0b\x32\x1b.caffe.ConvolutionParameter\x12(\n\ndata_param\x18\x0b \x01(\x0b\x32\x14.caffe.DataParameter\x12.\n\rdropout_param\x18\x0c \x01(\x0b\x32\x17.caffe.DropoutParameter\x12\x33\n\x10\x64ummy_data_param\x18\x1a \x01(\x0b\x32\x19.caffe.DummyDataParameter\x12.\n\reltwise_param\x18\x18 \x01(\x0b\x32\x17.caffe.EltwiseParameter\x12&\n\texp_param\x18) \x01(\x0b\x32\x13.caffe.ExpParameter\x12\x31\n\x0fhdf5_data_param\x18\r \x01(\x0b\x32\x18.caffe.HDF5DataParameter\x12\x35\n\x11hdf5_output_param\x18\x0e \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\x12\x33\n\x10hinge_loss_param\x18\x1d \x01(\x0b\x32\x19.caffe.HingeLossParameter\x12\x33\n\x10image_data_param\x18\x0f \x01(\x0b\x32\x19.caffe.ImageDataParameter\x12\x39\n\x13infogain_loss_param\x18\x10 \x01(\x0b\x32\x1c.caffe.InfogainLossParameter\x12\x39\n\x13inner_product_param\x18\x11 \x01(\x0b\x32\x1c.caffe.InnerProductParameter\x12&\n\tlrn_param\x18\x12 \x01(\x0b\x32\x13.caffe.LRNParameter\x12\x35\n\x11memory_data_param\x18\x16 \x01(\x0b\x32\x1a.caffe.MemoryDataParameter\x12&\n\tmvn_param\x18\" \x01(\x0b\x32\x13.caffe.MVNParameter\x12.\n\rpooling_param\x18\x13 \x01(\x0b\x32\x17.caffe.PoolingParameter\x12*\n\x0bpower_param\x18\x15 \x01(\x0b\x32\x15.caffe.PowerParameter\x12(\n\nrelu_param\x18\x1e \x01(\x0b\x32\x14.caffe.ReLUParameter\x12.\n\rsigmoid_param\x18& \x01(\x0b\x32\x17.caffe.SigmoidParameter\x12.\n\rsoftmax_param\x18\' \x01(\x0b\x32\x17.caffe.SoftmaxParameter\x12*\n\x0bslice_param\x18\x1f \x01(\x0b\x32\x15.caffe.SliceParameter\x12(\n\ntanh_param\x18% \x01(\x0b\x32\x14.caffe.TanHParameter\x12\x32\n\x0fthreshold_param\x18\x19 \x01(\x0b\x32\x19.caffe.ThresholdParameter\x12\x35\n\x11window_data_param\x18\x14 \x01(\x0b\x32\x1a.caffe.WindowDataParameter\x12\x37\n\x0ftransform_param\x18$ \x01(\x0b\x32\x1e.caffe.TransformationParameter\x12(\n\nloss_param\x18* \x01(\x0b\x32\x14.caffe.LossParameter\x12&\n\x05layer\x18\x01 \x01(\x0b\x32\x17.caffe.V0LayerParameter\"\xd8\x04\n\tLayerType\x12\x08\n\x04NONE\x10\x00\x12\n\n\x06\x41\x42SVAL\x10#\x12\x0c\n\x08\x41\x43\x43URACY\x10\x01\x12\n\n\x06\x41RGMAX\x10\x1e\x12\x08\n\x04\x42NLL\x10\x02\x12\n\n\x06\x43ONCAT\x10\x03\x12\x14\n\x10\x43ONTRASTIVE_LOSS\x10%\x12\x0f\n\x0b\x43ONVOLUTION\x10\x04\x12\x08\n\x04\x44\x41TA\x10\x05\x12\x11\n\rDECONVOLUTION\x10\'\x12\x0b\n\x07\x44ROPOUT\x10\x06\x12\x0e\n\nDUMMY_DATA\x10 \x12\x12\n\x0e\x45UCLIDEAN_LOSS\x10\x07\x12\x0b\n\x07\x45LTWISE\x10\x19\x12\x07\n\x03\x45XP\x10&\x12\x0b\n\x07\x46LATTEN\x10\x08\x12\r\n\tHDF5_DATA\x10\t\x12\x0f\n\x0bHDF5_OUTPUT\x10\n\x12\x0e\n\nHINGE_LOSS\x10\x1c\x12\n\n\x06IM2COL\x10\x0b\x12\x0e\n\nIMAGE_DATA\x10\x0c\x12\x11\n\rINFOGAIN_LOSS\x10\r\x12\x11\n\rINNER_PRODUCT\x10\x0e\x12\x07\n\x03LRN\x10\x0f\x12\x0f\n\x0bMEMORY_DATA\x10\x1d\x12\x1d\n\x19MULTINOMIAL_LOGISTIC_LOSS\x10\x10\x12\x07\n\x03MVN\x10\"\x12\x0b\n\x07POOLING\x10\x11\x12\t\n\x05POWER\x10\x1a\x12\x08\n\x04RELU\x10\x12\x12\x0b\n\x07SIGMOID\x10\x13\x12\x1e\n\x1aSIGMOID_CROSS_ENTROPY_LOSS\x10\x1b\x12\x0b\n\x07SILENCE\x10$\x12\x0b\n\x07SOFTMAX\x10\x14\x12\x10\n\x0cSOFTMAX_LOSS\x10\x15\x12\t\n\x05SPLIT\x10\x16\x12\t\n\x05SLICE\x10!\x12\x08\n\x04TANH\x10\x17\x12\x0f\n\x0bWINDOW_DATA\x10\x18\x12\r\n\tTHRESHOLD\x10\x1f\"*\n\x0c\x44imCheckMode\x12\n\n\x06STRICT\x10\x00\x12\x0e\n\nPERMISSIVE\x10\x01\"\xfd\x07\n\x10V0LayerParameter\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\x12\x12\n\nnum_output\x18\x03 \x01(\r\x12\x16\n\x08\x62iasterm\x18\x04 \x01(\x08:\x04true\x12-\n\rweight_filler\x18\x05 \x01(\x0b\x32\x16.caffe.FillerParameter\x12+\n\x0b\x62ias_filler\x18\x06 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x0e\n\x03pad\x18\x07 \x01(\r:\x01\x30\x12\x12\n\nkernelsize\x18\x08 \x01(\r\x12\x10\n\x05group\x18\t \x01(\r:\x01\x31\x12\x11\n\x06stride\x18\n \x01(\r:\x01\x31\x12\x35\n\x04pool\x18\x0b \x01(\x0e\x32\".caffe.V0LayerParameter.PoolMethod:\x03MAX\x12\x1a\n\rdropout_ratio\x18\x0c \x01(\x02:\x03\x30.5\x12\x15\n\nlocal_size\x18\r \x01(\r:\x01\x35\x12\x10\n\x05\x61lpha\x18\x0e \x01(\x02:\x01\x31\x12\x12\n\x04\x62\x65ta\x18\x0f \x01(\x02:\x04\x30.75\x12\x0c\n\x01k\x18\x16 \x01(\x02:\x01\x31\x12\x0e\n\x06source\x18\x10 \x01(\t\x12\x10\n\x05scale\x18\x11 \x01(\x02:\x01\x31\x12\x10\n\x08meanfile\x18\x12 \x01(\t\x12\x11\n\tbatchsize\x18\x13 \x01(\r\x12\x13\n\x08\x63ropsize\x18\x14 \x01(\r:\x01\x30\x12\x15\n\x06mirror\x18\x15 \x01(\x08:\x05\x66\x61lse\x12\x1f\n\x05\x62lobs\x18\x32 \x03(\x0b\x32\x10.caffe.BlobProto\x12\x10\n\x08\x62lobs_lr\x18\x33 \x03(\x02\x12\x14\n\x0cweight_decay\x18\x34 \x03(\x02\x12\x14\n\trand_skip\x18\x35 \x01(\r:\x01\x30\x12\x1d\n\x10\x64\x65t_fg_threshold\x18\x36 \x01(\x02:\x03\x30.5\x12\x1d\n\x10\x64\x65t_bg_threshold\x18\x37 \x01(\x02:\x03\x30.5\x12\x1d\n\x0f\x64\x65t_fg_fraction\x18\x38 \x01(\x02:\x04\x30.25\x12\x1a\n\x0f\x64\x65t_context_pad\x18: \x01(\r:\x01\x30\x12\x1b\n\rdet_crop_mode\x18; \x01(\t:\x04warp\x12\x12\n\x07new_num\x18< \x01(\x05:\x01\x30\x12\x17\n\x0cnew_channels\x18= \x01(\x05:\x01\x30\x12\x15\n\nnew_height\x18> \x01(\x05:\x01\x30\x12\x14\n\tnew_width\x18? \x01(\x05:\x01\x30\x12\x1d\n\x0eshuffle_images\x18@ \x01(\x08:\x05\x66\x61lse\x12\x15\n\nconcat_dim\x18\x41 \x01(\r:\x01\x31\x12\x36\n\x11hdf5_output_param\x18\xe9\x07 \x01(\x0b\x32\x1a.caffe.HDF5OutputParameter\".\n\nPoolMethod\x12\x07\n\x03MAX\x10\x00\x12\x07\n\x03\x41VE\x10\x01\x12\x0e\n\nSTOCHASTIC\x10\x02\"W\n\x0ePReLUParameter\x12&\n\x06\x66iller\x18\x01 \x01(\x0b\x32\x16.caffe.FillerParameter\x12\x1d\n\x0e\x63hannel_shared\x18\x02 \x01(\x08:\x05\x66\x61lse*\x1c\n\x05Phase\x12\t\n\x05TRAIN\x10\x00\x12\x08\n\x04TEST\x10\x01') +) +_sym_db.RegisterFileDescriptor(DESCRIPTOR) _PHASE = _descriptor.EnumDescriptor( name='Phase', @@ -33,9 +41,10 @@ ], containing_type=None, options=None, - serialized_start=14991, - serialized_end=15019, + serialized_start=15386, + serialized_end=15414, ) +_sym_db.RegisterEnumDescriptor(_PHASE) Phase = enum_type_wrapper.EnumTypeWrapper(_PHASE) TRAIN = 0 @@ -66,6 +75,7 @@ serialized_start=658, serialized_end=710, ) +_sym_db.RegisterEnumDescriptor(_FILLERPARAMETER_VARIANCENORM) _SOLVERPARAMETER_SNAPSHOTFORMAT = _descriptor.EnumDescriptor( name='SnapshotFormat', @@ -84,9 +94,10 @@ ], containing_type=None, options=None, - serialized_start=2132, - serialized_end=2175, + serialized_start=2171, + serialized_end=2214, ) +_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SNAPSHOTFORMAT) _SOLVERPARAMETER_SOLVERMODE = _descriptor.EnumDescriptor( name='SolverMode', @@ -105,9 +116,10 @@ ], containing_type=None, options=None, - serialized_start=2177, - serialized_end=2207, + serialized_start=2216, + serialized_end=2246, ) +_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERMODE) _SOLVERPARAMETER_SOLVERTYPE = _descriptor.EnumDescriptor( name='SolverType', @@ -142,9 +154,10 @@ ], containing_type=None, options=None, - serialized_start=2209, - serialized_end=2294, + serialized_start=2248, + serialized_end=2333, ) +_sym_db.RegisterEnumDescriptor(_SOLVERPARAMETER_SOLVERTYPE) _PARAMSPEC_DIMCHECKMODE = _descriptor.EnumDescriptor( name='DimCheckMode', @@ -163,9 +176,10 @@ ], containing_type=None, options=None, - serialized_start=2725, - serialized_end=2767, + serialized_start=2764, + serialized_end=2806, ) +_sym_db.RegisterEnumDescriptor(_PARAMSPEC_DIMCHECKMODE) _LOSSPARAMETER_NORMALIZATIONMODE = _descriptor.EnumDescriptor( name='NormalizationMode', @@ -192,9 +206,10 @@ ], containing_type=None, options=None, - serialized_start=5542, - serialized_end=5608, + serialized_start=5687, + serialized_end=5753, ) +_sym_db.RegisterEnumDescriptor(_LOSSPARAMETER_NORMALIZATIONMODE) _CONVOLUTIONPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', @@ -217,9 +232,10 @@ ], containing_type=None, options=None, - serialized_start=6573, - serialized_end=6616, + serialized_start=6718, + serialized_end=6761, ) +_sym_db.RegisterEnumDescriptor(_CONVOLUTIONPARAMETER_ENGINE) _DATAPARAMETER_DB = _descriptor.EnumDescriptor( name='DB', @@ -238,9 +254,10 @@ ], containing_type=None, options=None, - serialized_start=6934, - serialized_end=6961, + serialized_start=7079, + serialized_end=7106, ) +_sym_db.RegisterEnumDescriptor(_DATAPARAMETER_DB) _ELTWISEPARAMETER_ELTWISEOP = _descriptor.EnumDescriptor( name='EltwiseOp', @@ -263,9 +280,10 @@ ], containing_type=None, options=None, - serialized_start=7301, - serialized_end=7340, + serialized_start=7446, + serialized_end=7485, ) +_sym_db.RegisterEnumDescriptor(_ELTWISEPARAMETER_ELTWISEOP) _HINGELOSSPARAMETER_NORM = _descriptor.EnumDescriptor( name='Norm', @@ -284,9 +302,10 @@ ], containing_type=None, options=None, - serialized_start=7875, - serialized_end=7897, + serialized_start=8020, + serialized_end=8042, ) +_sym_db.RegisterEnumDescriptor(_HINGELOSSPARAMETER_NORM) _LRNPARAMETER_NORMREGION = _descriptor.EnumDescriptor( name='NormRegion', @@ -305,9 +324,10 @@ ], containing_type=None, options=None, - serialized_start=8764, - serialized_end=8817, + serialized_start=8909, + serialized_end=8962, ) +_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_NORMREGION) _LRNPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', @@ -330,9 +350,10 @@ ], containing_type=None, options=None, - serialized_start=6573, - serialized_end=6616, + serialized_start=6718, + serialized_end=6761, ) +_sym_db.RegisterEnumDescriptor(_LRNPARAMETER_ENGINE) _POOLINGPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor( name='PoolMethod', @@ -355,9 +376,10 @@ ], containing_type=None, options=None, - serialized_start=9386, - serialized_end=9432, + serialized_start=9586, + serialized_end=9632, ) +_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_POOLMETHOD) _POOLINGPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', @@ -380,9 +402,10 @@ ], containing_type=None, options=None, - serialized_start=6573, - serialized_end=6616, + serialized_start=6718, + serialized_end=6761, ) +_sym_db.RegisterEnumDescriptor(_POOLINGPARAMETER_ENGINE) _REDUCTIONPARAMETER_REDUCTIONOP = _descriptor.EnumDescriptor( name='ReductionOp', @@ -409,9 +432,10 @@ ], containing_type=None, options=None, - serialized_start=9777, - serialized_end=9830, + serialized_start=10172, + serialized_end=10225, ) +_sym_db.RegisterEnumDescriptor(_REDUCTIONPARAMETER_REDUCTIONOP) _RELUPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', @@ -434,9 +458,10 @@ ], containing_type=None, options=None, - serialized_start=6573, - serialized_end=6616, + serialized_start=6718, + serialized_end=6761, ) +_sym_db.RegisterEnumDescriptor(_RELUPARAMETER_ENGINE) _SIGMOIDPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', @@ -459,9 +484,10 @@ ], containing_type=None, options=None, - serialized_start=6573, - serialized_end=6616, + serialized_start=6718, + serialized_end=6761, ) +_sym_db.RegisterEnumDescriptor(_SIGMOIDPARAMETER_ENGINE) _SOFTMAXPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', @@ -484,9 +510,10 @@ ], containing_type=None, options=None, - serialized_start=6573, - serialized_end=6616, + serialized_start=6718, + serialized_end=6761, ) +_sym_db.RegisterEnumDescriptor(_SOFTMAXPARAMETER_ENGINE) _TANHPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', @@ -509,9 +536,10 @@ ], containing_type=None, options=None, - serialized_start=6573, - serialized_end=6616, + serialized_start=6718, + serialized_end=6761, ) +_sym_db.RegisterEnumDescriptor(_TANHPARAMETER_ENGINE) _SPPPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor( name='PoolMethod', @@ -534,9 +562,10 @@ ], containing_type=None, options=None, - serialized_start=9386, - serialized_end=9432, + serialized_start=9586, + serialized_end=9632, ) +_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_POOLMETHOD) _SPPPARAMETER_ENGINE = _descriptor.EnumDescriptor( name='Engine', @@ -559,9 +588,10 @@ ], containing_type=None, options=None, - serialized_start=6573, - serialized_end=6616, + serialized_start=6718, + serialized_end=6761, ) +_sym_db.RegisterEnumDescriptor(_SPPPARAMETER_ENGINE) _V1LAYERPARAMETER_LAYERTYPE = _descriptor.EnumDescriptor( name='LayerType', @@ -732,9 +762,10 @@ ], containing_type=None, options=None, - serialized_start=13232, - serialized_end=13832, + serialized_start=13627, + serialized_end=14227, ) +_sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_LAYERTYPE) _V1LAYERPARAMETER_DIMCHECKMODE = _descriptor.EnumDescriptor( name='DimCheckMode', @@ -753,9 +784,10 @@ ], containing_type=None, options=None, - serialized_start=2725, - serialized_end=2767, + serialized_start=2764, + serialized_end=2806, ) +_sym_db.RegisterEnumDescriptor(_V1LAYERPARAMETER_DIMCHECKMODE) _V0LAYERPARAMETER_POOLMETHOD = _descriptor.EnumDescriptor( name='PoolMethod', @@ -778,9 +810,10 @@ ], containing_type=None, options=None, - serialized_start=9386, - serialized_end=9432, + serialized_start=9586, + serialized_end=9632, ) +_sym_db.RegisterEnumDescriptor(_V0LAYERPARAMETER_POOLMETHOD) _BLOBSHAPE = _descriptor.Descriptor( @@ -796,7 +829,7 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\020\001')), + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), ], extensions=[ ], @@ -805,7 +838,10 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=22, serialized_end=50, ) @@ -831,28 +867,28 @@ has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\020\001')), + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), _descriptor.FieldDescriptor( name='diff', full_name='caffe.BlobProto.diff', index=2, number=6, type=2, cpp_type=6, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\020\001')), + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), _descriptor.FieldDescriptor( name='double_data', full_name='caffe.BlobProto.double_data', index=3, number=8, type=1, cpp_type=5, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\020\001')), + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), _descriptor.FieldDescriptor( name='double_diff', full_name='caffe.BlobProto.double_diff', index=4, number=9, type=1, cpp_type=5, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, - options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\020\001')), + options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))), _descriptor.FieldDescriptor( name='num', full_name='caffe.BlobProto.num', index=5, number=1, type=5, cpp_type=1, label=1, @@ -889,7 +925,10 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=53, serialized_end=257, ) @@ -917,7 +956,10 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=259, serialized_end=309, ) @@ -954,7 +996,7 @@ _descriptor.FieldDescriptor( name='data', full_name='caffe.Datum.data', index=3, number=4, type=12, cpp_type=9, label=1, - has_default_value=False, default_value="", + has_default_value=False, default_value=_b(""), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -987,7 +1029,10 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=312, serialized_end=441, ) @@ -1003,42 +1048,42 @@ _descriptor.FieldDescriptor( name='type', full_name='caffe.FillerParameter.type', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=str("constant", "utf-8"), + has_default_value=True, default_value=_b("constant").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='value', full_name='caffe.FillerParameter.value', index=1, number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, + has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='min', full_name='caffe.FillerParameter.min', index=2, number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, + has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='max', full_name='caffe.FillerParameter.max', index=3, number=4, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='mean', full_name='caffe.FillerParameter.mean', index=4, number=5, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, + has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='std', full_name='caffe.FillerParameter.std', index=5, number=6, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1065,7 +1110,10 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=444, serialized_end=710, ) @@ -1081,7 +1129,7 @@ _descriptor.FieldDescriptor( name='name', full_name='caffe.NetParameter.name', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1149,7 +1197,10 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=713, serialized_end=983, ) @@ -1165,7 +1216,7 @@ _descriptor.FieldDescriptor( name='net', full_name='caffe.SolverParameter.net', index=0, number=24, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1179,7 +1230,7 @@ _descriptor.FieldDescriptor( name='train_net', full_name='caffe.SolverParameter.train_net', index=2, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1249,7 +1300,7 @@ _descriptor.FieldDescriptor( name='base_lr', full_name='caffe.SolverParameter.base_lr', index=12, number=5, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, + has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1284,42 +1335,42 @@ _descriptor.FieldDescriptor( name='lr_policy', full_name='caffe.SolverParameter.lr_policy', index=17, number=8, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='gamma', full_name='caffe.SolverParameter.gamma', index=18, number=9, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, + has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='power', full_name='caffe.SolverParameter.power', index=19, number=10, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, + has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='momentum', full_name='caffe.SolverParameter.momentum', index=20, number=11, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, + has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='weight_decay', full_name='caffe.SolverParameter.weight_decay', index=21, number=12, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, + has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='regularization_type', full_name='caffe.SolverParameter.regularization_type', index=22, number=29, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=str("L2", "utf-8"), + has_default_value=True, default_value=_b("L2").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1340,7 +1391,7 @@ _descriptor.FieldDescriptor( name='clip_gradients', full_name='caffe.SolverParameter.clip_gradients', index=25, number=35, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=-1, + has_default_value=True, default_value=float(-1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1354,7 +1405,7 @@ _descriptor.FieldDescriptor( name='snapshot_prefix', full_name='caffe.SolverParameter.snapshot_prefix', index=27, number=15, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1396,28 +1447,28 @@ _descriptor.FieldDescriptor( name='type', full_name='caffe.SolverParameter.type', index=33, number=40, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=str("SGD", "utf-8"), + has_default_value=True, default_value=_b("SGD").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='delta', full_name='caffe.SolverParameter.delta', index=34, number=31, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1e-08, + has_default_value=True, default_value=float(1e-08), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='momentum2', full_name='caffe.SolverParameter.momentum2', index=35, number=39, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.999, + has_default_value=True, default_value=float(0.999), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='rms_decay', full_name='caffe.SolverParameter.rms_decay', index=36, number=38, type=2, cpp_type=6, label=1, - has_default_value=False, default_value=0, + has_default_value=True, default_value=float(0.99), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1442,6 +1493,13 @@ message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), + _descriptor.FieldDescriptor( + name='layer_wise_reduce', full_name='caffe.SolverParameter.layer_wise_reduce', index=40, + number=41, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=True, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), ], extensions=[ ], @@ -1453,9 +1511,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], + oneofs=[ + ], serialized_start=986, - serialized_end=2294, + serialized_end=2333, ) @@ -1476,7 +1537,7 @@ _descriptor.FieldDescriptor( name='learned_net', full_name='caffe.SolverState.learned_net', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1502,9 +1563,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=2296, - serialized_end=2404, + oneofs=[ + ], + serialized_start=2335, + serialized_end=2443, ) @@ -1544,9 +1608,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=2406, - serialized_end=2484, + oneofs=[ + ], + serialized_start=2445, + serialized_end=2523, ) @@ -1600,9 +1667,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=2486, - serialized_end=2601, + oneofs=[ + ], + serialized_start=2525, + serialized_end=2640, ) @@ -1616,7 +1686,7 @@ _descriptor.FieldDescriptor( name='name', full_name='caffe.ParamSpec.name', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1630,14 +1700,14 @@ _descriptor.FieldDescriptor( name='lr_mult', full_name='caffe.ParamSpec.lr_mult', index=2, number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='decay_mult', full_name='caffe.ParamSpec.decay_mult', index=3, number=4, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1650,9 +1720,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=2604, - serialized_end=2767, + oneofs=[ + ], + serialized_start=2643, + serialized_end=2806, ) @@ -1666,14 +1739,14 @@ _descriptor.FieldDescriptor( name='name', full_name='caffe.LayerParameter.name', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='type', full_name='caffe.LayerParameter.type', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -1944,112 +2017,126 @@ is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='pooling_param', full_name='caffe.LayerParameter.pooling_param', index=40, + name='parameter_param', full_name='caffe.LayerParameter.parameter_param', index=40, + number=145, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='pooling_param', full_name='caffe.LayerParameter.pooling_param', index=41, number=121, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='power_param', full_name='caffe.LayerParameter.power_param', index=41, + name='power_param', full_name='caffe.LayerParameter.power_param', index=42, number=122, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='prelu_param', full_name='caffe.LayerParameter.prelu_param', index=42, + name='prelu_param', full_name='caffe.LayerParameter.prelu_param', index=43, number=131, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='python_param', full_name='caffe.LayerParameter.python_param', index=43, + name='python_param', full_name='caffe.LayerParameter.python_param', index=44, number=130, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='reduction_param', full_name='caffe.LayerParameter.reduction_param', index=44, + name='recurrent_param', full_name='caffe.LayerParameter.recurrent_param', index=45, + number=146, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='reduction_param', full_name='caffe.LayerParameter.reduction_param', index=46, number=136, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='relu_param', full_name='caffe.LayerParameter.relu_param', index=45, + name='relu_param', full_name='caffe.LayerParameter.relu_param', index=47, number=123, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='reshape_param', full_name='caffe.LayerParameter.reshape_param', index=46, + name='reshape_param', full_name='caffe.LayerParameter.reshape_param', index=48, number=133, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='scale_param', full_name='caffe.LayerParameter.scale_param', index=47, + name='scale_param', full_name='caffe.LayerParameter.scale_param', index=49, number=142, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='sigmoid_param', full_name='caffe.LayerParameter.sigmoid_param', index=48, + name='sigmoid_param', full_name='caffe.LayerParameter.sigmoid_param', index=50, number=124, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='softmax_param', full_name='caffe.LayerParameter.softmax_param', index=49, + name='softmax_param', full_name='caffe.LayerParameter.softmax_param', index=51, number=125, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='spp_param', full_name='caffe.LayerParameter.spp_param', index=50, + name='spp_param', full_name='caffe.LayerParameter.spp_param', index=52, number=132, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='slice_param', full_name='caffe.LayerParameter.slice_param', index=51, + name='slice_param', full_name='caffe.LayerParameter.slice_param', index=53, number=126, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='tanh_param', full_name='caffe.LayerParameter.tanh_param', index=52, + name='tanh_param', full_name='caffe.LayerParameter.tanh_param', index=54, number=127, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='threshold_param', full_name='caffe.LayerParameter.threshold_param', index=53, + name='threshold_param', full_name='caffe.LayerParameter.threshold_param', index=55, number=128, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='tile_param', full_name='caffe.LayerParameter.tile_param', index=54, + name='tile_param', full_name='caffe.LayerParameter.tile_param', index=56, number=138, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( - name='window_data_param', full_name='caffe.LayerParameter.window_data_param', index=55, + name='window_data_param', full_name='caffe.LayerParameter.window_data_param', index=57, number=129, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, @@ -2063,9 +2150,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=2770, - serialized_end=5226, + oneofs=[ + ], + serialized_start=2809, + serialized_end=5371, ) @@ -2079,7 +2169,7 @@ _descriptor.FieldDescriptor( name='scale', full_name='caffe.TransformationParameter.scale', index=0, number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -2100,7 +2190,7 @@ _descriptor.FieldDescriptor( name='mean_file', full_name='caffe.TransformationParameter.mean_file', index=3, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -2133,9 +2223,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=5229, - serialized_end=5411, + oneofs=[ + ], + serialized_start=5374, + serialized_end=5556, ) @@ -2176,9 +2269,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=5414, - serialized_end=5608, + oneofs=[ + ], + serialized_start=5559, + serialized_end=5753, ) @@ -2218,9 +2314,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=5610, - serialized_end=5686, + oneofs=[ + ], + serialized_start=5755, + serialized_end=5831, ) @@ -2260,9 +2359,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=5688, - serialized_end=5765, + oneofs=[ + ], + serialized_start=5833, + serialized_end=5910, ) @@ -2295,9 +2397,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=5767, - serialized_end=5824, + oneofs=[ + ], + serialized_start=5912, + serialized_end=5969, ) @@ -2318,14 +2423,14 @@ _descriptor.FieldDescriptor( name='moving_average_fraction', full_name='caffe.BatchNormParameter.moving_average_fraction', index=1, number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.999, + has_default_value=True, default_value=float(0.999), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='eps', full_name='caffe.BatchNormParameter.eps', index=2, number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1e-05, + has_default_value=True, default_value=float(1e-05), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -2337,9 +2442,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=5826, - serialized_end=5932, + oneofs=[ + ], + serialized_start=5971, + serialized_end=6077, ) @@ -2379,9 +2487,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=5934, - serialized_end=6027, + oneofs=[ + ], + serialized_start=6079, + serialized_end=6172, ) @@ -2395,7 +2506,7 @@ _descriptor.FieldDescriptor( name='margin', full_name='caffe.ContrastiveLossParameter.margin', index=0, number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -2414,9 +2525,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=6029, - serialized_end=6105, + oneofs=[ + ], + serialized_start=6174, + serialized_end=6250, ) @@ -2562,9 +2676,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=6108, - serialized_end=6616, + oneofs=[ + ], + serialized_start=6253, + serialized_end=6761, ) @@ -2597,9 +2714,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=6618, - serialized_end=6666, + oneofs=[ + ], + serialized_start=6763, + serialized_end=6811, ) @@ -2613,7 +2733,7 @@ _descriptor.FieldDescriptor( name='source', full_name='caffe.DataParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -2641,14 +2761,14 @@ _descriptor.FieldDescriptor( name='scale', full_name='caffe.DataParameter.scale', index=4, number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='mean_file', full_name='caffe.DataParameter.mean_file', index=5, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -2689,9 +2809,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=6669, - serialized_end=6961, + oneofs=[ + ], + serialized_start=6814, + serialized_end=7106, ) @@ -2705,7 +2828,7 @@ _descriptor.FieldDescriptor( name='dropout_ratio', full_name='caffe.DropoutParameter.dropout_ratio', index=0, number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.5, + has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -2717,9 +2840,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=6963, - serialized_end=7009, + oneofs=[ + ], + serialized_start=7108, + serialized_end=7154, ) @@ -2780,9 +2906,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=7012, - serialized_end=7172, + oneofs=[ + ], + serialized_start=7157, + serialized_end=7317, ) @@ -2823,9 +2952,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=7175, - serialized_end=7340, + oneofs=[ + ], + serialized_start=7320, + serialized_end=7485, ) @@ -2839,7 +2971,7 @@ _descriptor.FieldDescriptor( name='alpha', full_name='caffe.ELUParameter.alpha', index=0, number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -2851,9 +2983,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=7342, - serialized_end=7374, + oneofs=[ + ], + serialized_start=7487, + serialized_end=7519, ) @@ -2907,9 +3042,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=7377, - serialized_end=7549, + oneofs=[ + ], + serialized_start=7522, + serialized_end=7694, ) @@ -2923,21 +3061,21 @@ _descriptor.FieldDescriptor( name='base', full_name='caffe.ExpParameter.base', index=0, number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=-1, + has_default_value=True, default_value=float(-1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='scale', full_name='caffe.ExpParameter.scale', index=1, number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='shift', full_name='caffe.ExpParameter.shift', index=2, number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, + has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -2949,9 +3087,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=7551, - serialized_end=7619, + oneofs=[ + ], + serialized_start=7696, + serialized_end=7764, ) @@ -2984,9 +3125,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=7621, - serialized_end=7678, + oneofs=[ + ], + serialized_start=7766, + serialized_end=7823, ) @@ -3000,7 +3144,7 @@ _descriptor.FieldDescriptor( name='source', full_name='caffe.HDF5DataParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3026,9 +3170,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=7680, - serialized_end=7759, + oneofs=[ + ], + serialized_start=7825, + serialized_end=7904, ) @@ -3042,7 +3189,7 @@ _descriptor.FieldDescriptor( name='file_name', full_name='caffe.HDF5OutputParameter.file_name', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3054,9 +3201,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=7761, - serialized_end=7801, + oneofs=[ + ], + serialized_start=7906, + serialized_end=7946, ) @@ -3083,9 +3233,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=7803, - serialized_end=7897, + oneofs=[ + ], + serialized_start=7948, + serialized_end=8042, ) @@ -3099,7 +3252,7 @@ _descriptor.FieldDescriptor( name='source', full_name='caffe.ImageDataParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3148,14 +3301,14 @@ _descriptor.FieldDescriptor( name='scale', full_name='caffe.ImageDataParameter.scale', index=7, number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='mean_file', full_name='caffe.ImageDataParameter.mean_file', index=8, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3176,7 +3329,7 @@ _descriptor.FieldDescriptor( name='root_folder', full_name='caffe.ImageDataParameter.root_folder', index=11, number=12, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=str("", "utf-8"), + has_default_value=True, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3188,9 +3341,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=7900, - serialized_end=8179, + oneofs=[ + ], + serialized_start=8045, + serialized_end=8324, ) @@ -3204,7 +3360,7 @@ _descriptor.FieldDescriptor( name='source', full_name='caffe.InfogainLossParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3216,9 +3372,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=8181, - serialized_end=8220, + oneofs=[ + ], + serialized_start=8326, + serialized_end=8365, ) @@ -3279,9 +3438,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=8223, - serialized_end=8426, + oneofs=[ + ], + serialized_start=8368, + serialized_end=8571, ) @@ -3307,9 +3469,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=8428, - serialized_end=8477, + oneofs=[ + ], + serialized_start=8573, + serialized_end=8622, ) @@ -3323,21 +3488,21 @@ _descriptor.FieldDescriptor( name='base', full_name='caffe.LogParameter.base', index=0, number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=-1, + has_default_value=True, default_value=float(-1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='scale', full_name='caffe.LogParameter.scale', index=1, number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='shift', full_name='caffe.LogParameter.shift', index=2, number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, + has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3349,9 +3514,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=8479, - serialized_end=8547, + oneofs=[ + ], + serialized_start=8624, + serialized_end=8692, ) @@ -3372,14 +3540,14 @@ _descriptor.FieldDescriptor( name='alpha', full_name='caffe.LRNParameter.alpha', index=1, number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='beta', full_name='caffe.LRNParameter.beta', index=2, number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.75, + has_default_value=True, default_value=float(0.75), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3393,7 +3561,7 @@ _descriptor.FieldDescriptor( name='k', full_name='caffe.LRNParameter.k', index=4, number=5, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3414,9 +3582,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=8550, - serialized_end=8862, + oneofs=[ + ], + serialized_start=8695, + serialized_end=9007, ) @@ -3463,9 +3634,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=8864, - serialized_end=8954, + oneofs=[ + ], + serialized_start=9009, + serialized_end=9099, ) @@ -3493,7 +3667,7 @@ _descriptor.FieldDescriptor( name='eps', full_name='caffe.MVNParameter.eps', index=2, number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1e-09, + has_default_value=True, default_value=float(1e-09), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3505,9 +3679,43 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=8956, - serialized_end=9056, + oneofs=[ + ], + serialized_start=9101, + serialized_end=9201, +) + + +_PARAMETERPARAMETER = _descriptor.Descriptor( + name='ParameterParameter', + full_name='caffe.ParameterParameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='shape', full_name='caffe.ParameterParameter.shape', index=0, + number=1, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=9203, + serialized_end=9256, ) @@ -3612,9 +3820,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=9059, - serialized_end=9477, + oneofs=[ + ], + serialized_start=9259, + serialized_end=9677, ) @@ -3628,21 +3839,21 @@ _descriptor.FieldDescriptor( name='power', full_name='caffe.PowerParameter.power', index=0, number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='scale', full_name='caffe.PowerParameter.scale', index=1, number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='shift', full_name='caffe.PowerParameter.shift', index=2, number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, + has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3654,9 +3865,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=9479, - serialized_end=9549, + oneofs=[ + ], + serialized_start=9679, + serialized_end=9749, ) @@ -3670,21 +3884,21 @@ _descriptor.FieldDescriptor( name='module', full_name='caffe.PythonParameter.module', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='layer', full_name='caffe.PythonParameter.layer', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='param_str', full_name='caffe.PythonParameter.param_str', index=2, number=3, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=str("", "utf-8"), + has_default_value=True, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3703,9 +3917,71 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=9551, - serialized_end=9654, + oneofs=[ + ], + serialized_start=9751, + serialized_end=9854, +) + + +_RECURRENTPARAMETER = _descriptor.Descriptor( + name='RecurrentParameter', + full_name='caffe.RecurrentParameter', + filename=None, + file=DESCRIPTOR, + containing_type=None, + fields=[ + _descriptor.FieldDescriptor( + name='num_output', full_name='caffe.RecurrentParameter.num_output', index=0, + number=1, type=13, cpp_type=3, label=1, + has_default_value=True, default_value=0, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='weight_filler', full_name='caffe.RecurrentParameter.weight_filler', index=1, + number=2, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='bias_filler', full_name='caffe.RecurrentParameter.bias_filler', index=2, + number=3, type=11, cpp_type=10, label=1, + has_default_value=False, default_value=None, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='debug_info', full_name='caffe.RecurrentParameter.debug_info', index=3, + number=4, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + _descriptor.FieldDescriptor( + name='expose_hidden', full_name='caffe.RecurrentParameter.expose_hidden', index=4, + number=5, type=8, cpp_type=7, label=1, + has_default_value=True, default_value=False, + message_type=None, enum_type=None, containing_type=None, + is_extension=False, extension_scope=None, + options=None), + ], + extensions=[ + ], + nested_types=[], + enum_types=[ + ], + options=None, + is_extendable=False, + syntax='proto2', + extension_ranges=[], + oneofs=[ + ], + serialized_start=9857, + serialized_end=10049, ) @@ -3733,7 +4009,7 @@ _descriptor.FieldDescriptor( name='coeff', full_name='caffe.ReductionParameter.coeff', index=2, number=3, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3746,9 +4022,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=9657, - serialized_end=9830, + oneofs=[ + ], + serialized_start=10052, + serialized_end=10225, ) @@ -3762,7 +4041,7 @@ _descriptor.FieldDescriptor( name='negative_slope', full_name='caffe.ReLUParameter.negative_slope', index=0, number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, + has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -3782,9 +4061,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=9833, - serialized_end=9974, + oneofs=[ + ], + serialized_start=10228, + serialized_end=10369, ) @@ -3824,9 +4106,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=9976, - serialized_end=10066, + oneofs=[ + ], + serialized_start=10371, + serialized_end=10461, ) @@ -3880,9 +4165,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=10069, - serialized_end=10234, + oneofs=[ + ], + serialized_start=10464, + serialized_end=10629, ) @@ -3909,9 +4197,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=10236, - serialized_end=10356, + oneofs=[ + ], + serialized_start=10631, + serialized_end=10751, ) @@ -3951,9 +4242,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=10358, - serialized_end=10434, + oneofs=[ + ], + serialized_start=10753, + serialized_end=10829, ) @@ -3987,9 +4281,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=10437, - serialized_end=10574, + oneofs=[ + ], + serialized_start=10832, + serialized_end=10969, ) @@ -4016,9 +4313,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=10576, - serialized_end=10690, + oneofs=[ + ], + serialized_start=10971, + serialized_end=11085, ) @@ -4051,9 +4351,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=10692, - serialized_end=10739, + oneofs=[ + ], + serialized_start=11087, + serialized_end=11134, ) @@ -4067,7 +4370,7 @@ _descriptor.FieldDescriptor( name='threshold', full_name='caffe.ThresholdParameter.threshold', index=0, number=1, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0, + has_default_value=True, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4079,9 +4382,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=10741, - serialized_end=10783, + oneofs=[ + ], + serialized_start=11136, + serialized_end=11178, ) @@ -4095,21 +4401,21 @@ _descriptor.FieldDescriptor( name='source', full_name='caffe.WindowDataParameter.source', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='scale', full_name='caffe.WindowDataParameter.scale', index=1, number=2, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='mean_file', full_name='caffe.WindowDataParameter.mean_file', index=2, number=3, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4137,21 +4443,21 @@ _descriptor.FieldDescriptor( name='fg_threshold', full_name='caffe.WindowDataParameter.fg_threshold', index=6, number=7, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.5, + has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='bg_threshold', full_name='caffe.WindowDataParameter.bg_threshold', index=7, number=8, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.5, + has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='fg_fraction', full_name='caffe.WindowDataParameter.fg_fraction', index=8, number=9, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.25, + has_default_value=True, default_value=float(0.25), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4165,7 +4471,7 @@ _descriptor.FieldDescriptor( name='crop_mode', full_name='caffe.WindowDataParameter.crop_mode', index=10, number=11, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=str("warp", "utf-8"), + has_default_value=True, default_value=_b("warp").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4179,7 +4485,7 @@ _descriptor.FieldDescriptor( name='root_folder', full_name='caffe.WindowDataParameter.root_folder', index=12, number=13, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=str("", "utf-8"), + has_default_value=True, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4191,9 +4497,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=10786, - serialized_end=11107, + oneofs=[ + ], + serialized_start=11181, + serialized_end=11502, ) @@ -4235,9 +4544,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=11110, - serialized_end=11345, + oneofs=[ + ], + serialized_start=11505, + serialized_end=11740, ) @@ -4265,7 +4577,7 @@ _descriptor.FieldDescriptor( name='name', full_name='caffe.V1LayerParameter.name', index=2, number=4, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4559,9 +4871,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=11348, - serialized_end=13876, + oneofs=[ + ], + serialized_start=11743, + serialized_end=14271, ) @@ -4575,14 +4890,14 @@ _descriptor.FieldDescriptor( name='name', full_name='caffe.V0LayerParameter.name', index=0, number=1, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='type', full_name='caffe.V0LayerParameter.type', index=1, number=2, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4652,7 +4967,7 @@ _descriptor.FieldDescriptor( name='dropout_ratio', full_name='caffe.V0LayerParameter.dropout_ratio', index=11, number=12, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.5, + has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4666,42 +4981,42 @@ _descriptor.FieldDescriptor( name='alpha', full_name='caffe.V0LayerParameter.alpha', index=13, number=14, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='beta', full_name='caffe.V0LayerParameter.beta', index=14, number=15, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.75, + has_default_value=True, default_value=float(0.75), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='k', full_name='caffe.V0LayerParameter.k', index=15, number=22, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='source', full_name='caffe.V0LayerParameter.source', index=16, number=16, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='scale', full_name='caffe.V0LayerParameter.scale', index=17, number=17, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=1, + has_default_value=True, default_value=float(1), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='meanfile', full_name='caffe.V0LayerParameter.meanfile', index=18, number=18, type=9, cpp_type=9, label=1, - has_default_value=False, default_value=str("", "utf-8"), + has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4757,21 +5072,21 @@ _descriptor.FieldDescriptor( name='det_fg_threshold', full_name='caffe.V0LayerParameter.det_fg_threshold', index=26, number=54, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.5, + has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='det_bg_threshold', full_name='caffe.V0LayerParameter.det_bg_threshold', index=27, number=55, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.5, + has_default_value=True, default_value=float(0.5), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='det_fg_fraction', full_name='caffe.V0LayerParameter.det_fg_fraction', index=28, number=56, type=2, cpp_type=6, label=1, - has_default_value=True, default_value=0.25, + has_default_value=True, default_value=float(0.25), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4785,7 +5100,7 @@ _descriptor.FieldDescriptor( name='det_crop_mode', full_name='caffe.V0LayerParameter.det_crop_mode', index=30, number=59, type=9, cpp_type=9, label=1, - has_default_value=True, default_value=str("warp", "utf-8"), + has_default_value=True, default_value=_b("warp").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), @@ -4847,9 +5162,12 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=13879, - serialized_end=14900, + oneofs=[ + ], + serialized_start=14274, + serialized_end=15295, ) @@ -4882,15 +5200,18 @@ ], options=None, is_extendable=False, + syntax='proto2', extension_ranges=[], - serialized_start=14902, - serialized_end=14989, + oneofs=[ + ], + serialized_start=15297, + serialized_end=15384, ) _BLOBPROTO.fields_by_name['shape'].message_type = _BLOBSHAPE _BLOBPROTOVECTOR.fields_by_name['blobs'].message_type = _BLOBPROTO _FILLERPARAMETER.fields_by_name['variance_norm'].enum_type = _FILLERPARAMETER_VARIANCENORM -_FILLERPARAMETER_VARIANCENORM.containing_type = _FILLERPARAMETER; +_FILLERPARAMETER_VARIANCENORM.containing_type = _FILLERPARAMETER _NETPARAMETER.fields_by_name['input_shape'].message_type = _BLOBSHAPE _NETPARAMETER.fields_by_name['state'].message_type = _NETSTATE _NETPARAMETER.fields_by_name['layer'].message_type = _LAYERPARAMETER @@ -4903,14 +5224,14 @@ _SOLVERPARAMETER.fields_by_name['snapshot_format'].enum_type = _SOLVERPARAMETER_SNAPSHOTFORMAT _SOLVERPARAMETER.fields_by_name['solver_mode'].enum_type = _SOLVERPARAMETER_SOLVERMODE _SOLVERPARAMETER.fields_by_name['solver_type'].enum_type = _SOLVERPARAMETER_SOLVERTYPE -_SOLVERPARAMETER_SNAPSHOTFORMAT.containing_type = _SOLVERPARAMETER; -_SOLVERPARAMETER_SOLVERMODE.containing_type = _SOLVERPARAMETER; -_SOLVERPARAMETER_SOLVERTYPE.containing_type = _SOLVERPARAMETER; +_SOLVERPARAMETER_SNAPSHOTFORMAT.containing_type = _SOLVERPARAMETER +_SOLVERPARAMETER_SOLVERMODE.containing_type = _SOLVERPARAMETER +_SOLVERPARAMETER_SOLVERTYPE.containing_type = _SOLVERPARAMETER _SOLVERSTATE.fields_by_name['history'].message_type = _BLOBPROTO _NETSTATE.fields_by_name['phase'].enum_type = _PHASE _NETSTATERULE.fields_by_name['phase'].enum_type = _PHASE _PARAMSPEC.fields_by_name['share_mode'].enum_type = _PARAMSPEC_DIMCHECKMODE -_PARAMSPEC_DIMCHECKMODE.containing_type = _PARAMSPEC; +_PARAMSPEC_DIMCHECKMODE.containing_type = _PARAMSPEC _LAYERPARAMETER.fields_by_name['phase'].enum_type = _PHASE _LAYERPARAMETER.fields_by_name['param'].message_type = _PARAMSPEC _LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO @@ -4945,10 +5266,12 @@ _LAYERPARAMETER.fields_by_name['lrn_param'].message_type = _LRNPARAMETER _LAYERPARAMETER.fields_by_name['memory_data_param'].message_type = _MEMORYDATAPARAMETER _LAYERPARAMETER.fields_by_name['mvn_param'].message_type = _MVNPARAMETER +_LAYERPARAMETER.fields_by_name['parameter_param'].message_type = _PARAMETERPARAMETER _LAYERPARAMETER.fields_by_name['pooling_param'].message_type = _POOLINGPARAMETER _LAYERPARAMETER.fields_by_name['power_param'].message_type = _POWERPARAMETER _LAYERPARAMETER.fields_by_name['prelu_param'].message_type = _PRELUPARAMETER _LAYERPARAMETER.fields_by_name['python_param'].message_type = _PYTHONPARAMETER +_LAYERPARAMETER.fields_by_name['recurrent_param'].message_type = _RECURRENTPARAMETER _LAYERPARAMETER.fields_by_name['reduction_param'].message_type = _REDUCTIONPARAMETER _LAYERPARAMETER.fields_by_name['relu_param'].message_type = _RELUPARAMETER _LAYERPARAMETER.fields_by_name['reshape_param'].message_type = _RESHAPEPARAMETER @@ -4962,50 +5285,53 @@ _LAYERPARAMETER.fields_by_name['tile_param'].message_type = _TILEPARAMETER _LAYERPARAMETER.fields_by_name['window_data_param'].message_type = _WINDOWDATAPARAMETER _LOSSPARAMETER.fields_by_name['normalization'].enum_type = _LOSSPARAMETER_NORMALIZATIONMODE -_LOSSPARAMETER_NORMALIZATIONMODE.containing_type = _LOSSPARAMETER; +_LOSSPARAMETER_NORMALIZATIONMODE.containing_type = _LOSSPARAMETER _BIASPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER _CONVOLUTIONPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER _CONVOLUTIONPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER _CONVOLUTIONPARAMETER.fields_by_name['engine'].enum_type = _CONVOLUTIONPARAMETER_ENGINE -_CONVOLUTIONPARAMETER_ENGINE.containing_type = _CONVOLUTIONPARAMETER; +_CONVOLUTIONPARAMETER_ENGINE.containing_type = _CONVOLUTIONPARAMETER _DATAPARAMETER.fields_by_name['backend'].enum_type = _DATAPARAMETER_DB -_DATAPARAMETER_DB.containing_type = _DATAPARAMETER; +_DATAPARAMETER_DB.containing_type = _DATAPARAMETER _DUMMYDATAPARAMETER.fields_by_name['data_filler'].message_type = _FILLERPARAMETER _DUMMYDATAPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE _ELTWISEPARAMETER.fields_by_name['operation'].enum_type = _ELTWISEPARAMETER_ELTWISEOP -_ELTWISEPARAMETER_ELTWISEOP.containing_type = _ELTWISEPARAMETER; +_ELTWISEPARAMETER_ELTWISEOP.containing_type = _ELTWISEPARAMETER _EMBEDPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER _EMBEDPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER _HINGELOSSPARAMETER.fields_by_name['norm'].enum_type = _HINGELOSSPARAMETER_NORM -_HINGELOSSPARAMETER_NORM.containing_type = _HINGELOSSPARAMETER; +_HINGELOSSPARAMETER_NORM.containing_type = _HINGELOSSPARAMETER _INNERPRODUCTPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER _INNERPRODUCTPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER _INPUTPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE _LRNPARAMETER.fields_by_name['norm_region'].enum_type = _LRNPARAMETER_NORMREGION _LRNPARAMETER.fields_by_name['engine'].enum_type = _LRNPARAMETER_ENGINE -_LRNPARAMETER_NORMREGION.containing_type = _LRNPARAMETER; -_LRNPARAMETER_ENGINE.containing_type = _LRNPARAMETER; +_LRNPARAMETER_NORMREGION.containing_type = _LRNPARAMETER +_LRNPARAMETER_ENGINE.containing_type = _LRNPARAMETER +_PARAMETERPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE _POOLINGPARAMETER.fields_by_name['pool'].enum_type = _POOLINGPARAMETER_POOLMETHOD _POOLINGPARAMETER.fields_by_name['engine'].enum_type = _POOLINGPARAMETER_ENGINE -_POOLINGPARAMETER_POOLMETHOD.containing_type = _POOLINGPARAMETER; -_POOLINGPARAMETER_ENGINE.containing_type = _POOLINGPARAMETER; +_POOLINGPARAMETER_POOLMETHOD.containing_type = _POOLINGPARAMETER +_POOLINGPARAMETER_ENGINE.containing_type = _POOLINGPARAMETER +_RECURRENTPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER +_RECURRENTPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER _REDUCTIONPARAMETER.fields_by_name['operation'].enum_type = _REDUCTIONPARAMETER_REDUCTIONOP -_REDUCTIONPARAMETER_REDUCTIONOP.containing_type = _REDUCTIONPARAMETER; +_REDUCTIONPARAMETER_REDUCTIONOP.containing_type = _REDUCTIONPARAMETER _RELUPARAMETER.fields_by_name['engine'].enum_type = _RELUPARAMETER_ENGINE -_RELUPARAMETER_ENGINE.containing_type = _RELUPARAMETER; +_RELUPARAMETER_ENGINE.containing_type = _RELUPARAMETER _RESHAPEPARAMETER.fields_by_name['shape'].message_type = _BLOBSHAPE _SCALEPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER _SCALEPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER _SIGMOIDPARAMETER.fields_by_name['engine'].enum_type = _SIGMOIDPARAMETER_ENGINE -_SIGMOIDPARAMETER_ENGINE.containing_type = _SIGMOIDPARAMETER; +_SIGMOIDPARAMETER_ENGINE.containing_type = _SIGMOIDPARAMETER _SOFTMAXPARAMETER.fields_by_name['engine'].enum_type = _SOFTMAXPARAMETER_ENGINE -_SOFTMAXPARAMETER_ENGINE.containing_type = _SOFTMAXPARAMETER; +_SOFTMAXPARAMETER_ENGINE.containing_type = _SOFTMAXPARAMETER _TANHPARAMETER.fields_by_name['engine'].enum_type = _TANHPARAMETER_ENGINE -_TANHPARAMETER_ENGINE.containing_type = _TANHPARAMETER; +_TANHPARAMETER_ENGINE.containing_type = _TANHPARAMETER _SPPPARAMETER.fields_by_name['pool'].enum_type = _SPPPARAMETER_POOLMETHOD _SPPPARAMETER.fields_by_name['engine'].enum_type = _SPPPARAMETER_ENGINE -_SPPPARAMETER_POOLMETHOD.containing_type = _SPPPARAMETER; -_SPPPARAMETER_ENGINE.containing_type = _SPPPARAMETER; +_SPPPARAMETER_POOLMETHOD.containing_type = _SPPPARAMETER +_SPPPARAMETER_ENGINE.containing_type = _SPPPARAMETER _V1LAYERPARAMETER.fields_by_name['include'].message_type = _NETSTATERULE _V1LAYERPARAMETER.fields_by_name['exclude'].message_type = _NETSTATERULE _V1LAYERPARAMETER.fields_by_name['type'].enum_type = _V1LAYERPARAMETER_LAYERTYPE @@ -5042,14 +5368,14 @@ _V1LAYERPARAMETER.fields_by_name['transform_param'].message_type = _TRANSFORMATIONPARAMETER _V1LAYERPARAMETER.fields_by_name['loss_param'].message_type = _LOSSPARAMETER _V1LAYERPARAMETER.fields_by_name['layer'].message_type = _V0LAYERPARAMETER -_V1LAYERPARAMETER_LAYERTYPE.containing_type = _V1LAYERPARAMETER; -_V1LAYERPARAMETER_DIMCHECKMODE.containing_type = _V1LAYERPARAMETER; +_V1LAYERPARAMETER_LAYERTYPE.containing_type = _V1LAYERPARAMETER +_V1LAYERPARAMETER_DIMCHECKMODE.containing_type = _V1LAYERPARAMETER _V0LAYERPARAMETER.fields_by_name['weight_filler'].message_type = _FILLERPARAMETER _V0LAYERPARAMETER.fields_by_name['bias_filler'].message_type = _FILLERPARAMETER _V0LAYERPARAMETER.fields_by_name['pool'].enum_type = _V0LAYERPARAMETER_POOLMETHOD _V0LAYERPARAMETER.fields_by_name['blobs'].message_type = _BLOBPROTO _V0LAYERPARAMETER.fields_by_name['hdf5_output_param'].message_type = _HDF5OUTPUTPARAMETER -_V0LAYERPARAMETER_POOLMETHOD.containing_type = _V0LAYERPARAMETER; +_V0LAYERPARAMETER_POOLMETHOD.containing_type = _V0LAYERPARAMETER _PRELUPARAMETER.fields_by_name['filler'].message_type = _FILLERPARAMETER DESCRIPTOR.message_types_by_name['BlobShape'] = _BLOBSHAPE DESCRIPTOR.message_types_by_name['BlobProto'] = _BLOBPROTO @@ -5092,9 +5418,11 @@ DESCRIPTOR.message_types_by_name['LRNParameter'] = _LRNPARAMETER DESCRIPTOR.message_types_by_name['MemoryDataParameter'] = _MEMORYDATAPARAMETER DESCRIPTOR.message_types_by_name['MVNParameter'] = _MVNPARAMETER +DESCRIPTOR.message_types_by_name['ParameterParameter'] = _PARAMETERPARAMETER DESCRIPTOR.message_types_by_name['PoolingParameter'] = _POOLINGPARAMETER DESCRIPTOR.message_types_by_name['PowerParameter'] = _POWERPARAMETER DESCRIPTOR.message_types_by_name['PythonParameter'] = _PYTHONPARAMETER +DESCRIPTOR.message_types_by_name['RecurrentParameter'] = _RECURRENTPARAMETER DESCRIPTOR.message_types_by_name['ReductionParameter'] = _REDUCTIONPARAMETER DESCRIPTOR.message_types_by_name['ReLUParameter'] = _RELUPARAMETER DESCRIPTOR.message_types_by_name['ReshapeParameter'] = _RESHAPEPARAMETER @@ -5110,311 +5438,444 @@ DESCRIPTOR.message_types_by_name['V1LayerParameter'] = _V1LAYERPARAMETER DESCRIPTOR.message_types_by_name['V0LayerParameter'] = _V0LAYERPARAMETER DESCRIPTOR.message_types_by_name['PReLUParameter'] = _PRELUPARAMETER +DESCRIPTOR.enum_types_by_name['Phase'] = _PHASE -class BlobShape(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _BLOBSHAPE - +BlobShape = _reflection.GeneratedProtocolMessageType('BlobShape', (_message.Message,), dict( + DESCRIPTOR = _BLOBSHAPE, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.BlobShape) + )) +_sym_db.RegisterMessage(BlobShape) -class BlobProto(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _BLOBPROTO - +BlobProto = _reflection.GeneratedProtocolMessageType('BlobProto', (_message.Message,), dict( + DESCRIPTOR = _BLOBPROTO, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.BlobProto) + )) +_sym_db.RegisterMessage(BlobProto) -class BlobProtoVector(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _BLOBPROTOVECTOR - +BlobProtoVector = _reflection.GeneratedProtocolMessageType('BlobProtoVector', (_message.Message,), dict( + DESCRIPTOR = _BLOBPROTOVECTOR, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.BlobProtoVector) + )) +_sym_db.RegisterMessage(BlobProtoVector) -class Datum(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _DATUM - +Datum = _reflection.GeneratedProtocolMessageType('Datum', (_message.Message,), dict( + DESCRIPTOR = _DATUM, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.Datum) + )) +_sym_db.RegisterMessage(Datum) -class FillerParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _FILLERPARAMETER - +FillerParameter = _reflection.GeneratedProtocolMessageType('FillerParameter', (_message.Message,), dict( + DESCRIPTOR = _FILLERPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.FillerParameter) + )) +_sym_db.RegisterMessage(FillerParameter) -class NetParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _NETPARAMETER - +NetParameter = _reflection.GeneratedProtocolMessageType('NetParameter', (_message.Message,), dict( + DESCRIPTOR = _NETPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.NetParameter) + )) +_sym_db.RegisterMessage(NetParameter) -class SolverParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _SOLVERPARAMETER - +SolverParameter = _reflection.GeneratedProtocolMessageType('SolverParameter', (_message.Message,), dict( + DESCRIPTOR = _SOLVERPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.SolverParameter) + )) +_sym_db.RegisterMessage(SolverParameter) -class SolverState(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _SOLVERSTATE - +SolverState = _reflection.GeneratedProtocolMessageType('SolverState', (_message.Message,), dict( + DESCRIPTOR = _SOLVERSTATE, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.SolverState) + )) +_sym_db.RegisterMessage(SolverState) -class NetState(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _NETSTATE - +NetState = _reflection.GeneratedProtocolMessageType('NetState', (_message.Message,), dict( + DESCRIPTOR = _NETSTATE, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.NetState) + )) +_sym_db.RegisterMessage(NetState) -class NetStateRule(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _NETSTATERULE - +NetStateRule = _reflection.GeneratedProtocolMessageType('NetStateRule', (_message.Message,), dict( + DESCRIPTOR = _NETSTATERULE, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.NetStateRule) + )) +_sym_db.RegisterMessage(NetStateRule) -class ParamSpec(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _PARAMSPEC - +ParamSpec = _reflection.GeneratedProtocolMessageType('ParamSpec', (_message.Message,), dict( + DESCRIPTOR = _PARAMSPEC, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ParamSpec) + )) +_sym_db.RegisterMessage(ParamSpec) -class LayerParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _LAYERPARAMETER - +LayerParameter = _reflection.GeneratedProtocolMessageType('LayerParameter', (_message.Message,), dict( + DESCRIPTOR = _LAYERPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.LayerParameter) + )) +_sym_db.RegisterMessage(LayerParameter) -class TransformationParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _TRANSFORMATIONPARAMETER - +TransformationParameter = _reflection.GeneratedProtocolMessageType('TransformationParameter', (_message.Message,), dict( + DESCRIPTOR = _TRANSFORMATIONPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.TransformationParameter) + )) +_sym_db.RegisterMessage(TransformationParameter) -class LossParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _LOSSPARAMETER - +LossParameter = _reflection.GeneratedProtocolMessageType('LossParameter', (_message.Message,), dict( + DESCRIPTOR = _LOSSPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.LossParameter) + )) +_sym_db.RegisterMessage(LossParameter) -class AccuracyParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _ACCURACYPARAMETER - +AccuracyParameter = _reflection.GeneratedProtocolMessageType('AccuracyParameter', (_message.Message,), dict( + DESCRIPTOR = _ACCURACYPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.AccuracyParameter) + )) +_sym_db.RegisterMessage(AccuracyParameter) -class ArgMaxParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _ARGMAXPARAMETER - +ArgMaxParameter = _reflection.GeneratedProtocolMessageType('ArgMaxParameter', (_message.Message,), dict( + DESCRIPTOR = _ARGMAXPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ArgMaxParameter) + )) +_sym_db.RegisterMessage(ArgMaxParameter) -class ConcatParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _CONCATPARAMETER - +ConcatParameter = _reflection.GeneratedProtocolMessageType('ConcatParameter', (_message.Message,), dict( + DESCRIPTOR = _CONCATPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ConcatParameter) + )) +_sym_db.RegisterMessage(ConcatParameter) -class BatchNormParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _BATCHNORMPARAMETER - +BatchNormParameter = _reflection.GeneratedProtocolMessageType('BatchNormParameter', (_message.Message,), dict( + DESCRIPTOR = _BATCHNORMPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.BatchNormParameter) + )) +_sym_db.RegisterMessage(BatchNormParameter) -class BiasParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _BIASPARAMETER - +BiasParameter = _reflection.GeneratedProtocolMessageType('BiasParameter', (_message.Message,), dict( + DESCRIPTOR = _BIASPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.BiasParameter) + )) +_sym_db.RegisterMessage(BiasParameter) -class ContrastiveLossParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _CONTRASTIVELOSSPARAMETER - +ContrastiveLossParameter = _reflection.GeneratedProtocolMessageType('ContrastiveLossParameter', (_message.Message,), dict( + DESCRIPTOR = _CONTRASTIVELOSSPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ContrastiveLossParameter) + )) +_sym_db.RegisterMessage(ContrastiveLossParameter) -class ConvolutionParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _CONVOLUTIONPARAMETER - +ConvolutionParameter = _reflection.GeneratedProtocolMessageType('ConvolutionParameter', (_message.Message,), dict( + DESCRIPTOR = _CONVOLUTIONPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ConvolutionParameter) + )) +_sym_db.RegisterMessage(ConvolutionParameter) -class CropParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _CROPPARAMETER - +CropParameter = _reflection.GeneratedProtocolMessageType('CropParameter', (_message.Message,), dict( + DESCRIPTOR = _CROPPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.CropParameter) + )) +_sym_db.RegisterMessage(CropParameter) -class DataParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _DATAPARAMETER - +DataParameter = _reflection.GeneratedProtocolMessageType('DataParameter', (_message.Message,), dict( + DESCRIPTOR = _DATAPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.DataParameter) + )) +_sym_db.RegisterMessage(DataParameter) -class DropoutParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _DROPOUTPARAMETER - +DropoutParameter = _reflection.GeneratedProtocolMessageType('DropoutParameter', (_message.Message,), dict( + DESCRIPTOR = _DROPOUTPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.DropoutParameter) + )) +_sym_db.RegisterMessage(DropoutParameter) -class DummyDataParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _DUMMYDATAPARAMETER - +DummyDataParameter = _reflection.GeneratedProtocolMessageType('DummyDataParameter', (_message.Message,), dict( + DESCRIPTOR = _DUMMYDATAPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.DummyDataParameter) + )) +_sym_db.RegisterMessage(DummyDataParameter) -class EltwiseParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _ELTWISEPARAMETER - +EltwiseParameter = _reflection.GeneratedProtocolMessageType('EltwiseParameter', (_message.Message,), dict( + DESCRIPTOR = _ELTWISEPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.EltwiseParameter) + )) +_sym_db.RegisterMessage(EltwiseParameter) -class ELUParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _ELUPARAMETER - +ELUParameter = _reflection.GeneratedProtocolMessageType('ELUParameter', (_message.Message,), dict( + DESCRIPTOR = _ELUPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ELUParameter) + )) +_sym_db.RegisterMessage(ELUParameter) -class EmbedParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _EMBEDPARAMETER - +EmbedParameter = _reflection.GeneratedProtocolMessageType('EmbedParameter', (_message.Message,), dict( + DESCRIPTOR = _EMBEDPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.EmbedParameter) + )) +_sym_db.RegisterMessage(EmbedParameter) -class ExpParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _EXPPARAMETER - +ExpParameter = _reflection.GeneratedProtocolMessageType('ExpParameter', (_message.Message,), dict( + DESCRIPTOR = _EXPPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ExpParameter) + )) +_sym_db.RegisterMessage(ExpParameter) -class FlattenParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _FLATTENPARAMETER - +FlattenParameter = _reflection.GeneratedProtocolMessageType('FlattenParameter', (_message.Message,), dict( + DESCRIPTOR = _FLATTENPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.FlattenParameter) + )) +_sym_db.RegisterMessage(FlattenParameter) -class HDF5DataParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _HDF5DATAPARAMETER - +HDF5DataParameter = _reflection.GeneratedProtocolMessageType('HDF5DataParameter', (_message.Message,), dict( + DESCRIPTOR = _HDF5DATAPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.HDF5DataParameter) + )) +_sym_db.RegisterMessage(HDF5DataParameter) -class HDF5OutputParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _HDF5OUTPUTPARAMETER - +HDF5OutputParameter = _reflection.GeneratedProtocolMessageType('HDF5OutputParameter', (_message.Message,), dict( + DESCRIPTOR = _HDF5OUTPUTPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.HDF5OutputParameter) + )) +_sym_db.RegisterMessage(HDF5OutputParameter) -class HingeLossParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _HINGELOSSPARAMETER - +HingeLossParameter = _reflection.GeneratedProtocolMessageType('HingeLossParameter', (_message.Message,), dict( + DESCRIPTOR = _HINGELOSSPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.HingeLossParameter) + )) +_sym_db.RegisterMessage(HingeLossParameter) -class ImageDataParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _IMAGEDATAPARAMETER - +ImageDataParameter = _reflection.GeneratedProtocolMessageType('ImageDataParameter', (_message.Message,), dict( + DESCRIPTOR = _IMAGEDATAPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ImageDataParameter) + )) +_sym_db.RegisterMessage(ImageDataParameter) -class InfogainLossParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _INFOGAINLOSSPARAMETER - +InfogainLossParameter = _reflection.GeneratedProtocolMessageType('InfogainLossParameter', (_message.Message,), dict( + DESCRIPTOR = _INFOGAINLOSSPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.InfogainLossParameter) + )) +_sym_db.RegisterMessage(InfogainLossParameter) -class InnerProductParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _INNERPRODUCTPARAMETER - +InnerProductParameter = _reflection.GeneratedProtocolMessageType('InnerProductParameter', (_message.Message,), dict( + DESCRIPTOR = _INNERPRODUCTPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.InnerProductParameter) + )) +_sym_db.RegisterMessage(InnerProductParameter) -class InputParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _INPUTPARAMETER - +InputParameter = _reflection.GeneratedProtocolMessageType('InputParameter', (_message.Message,), dict( + DESCRIPTOR = _INPUTPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.InputParameter) + )) +_sym_db.RegisterMessage(InputParameter) -class LogParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _LOGPARAMETER - +LogParameter = _reflection.GeneratedProtocolMessageType('LogParameter', (_message.Message,), dict( + DESCRIPTOR = _LOGPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.LogParameter) + )) +_sym_db.RegisterMessage(LogParameter) -class LRNParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _LRNPARAMETER - +LRNParameter = _reflection.GeneratedProtocolMessageType('LRNParameter', (_message.Message,), dict( + DESCRIPTOR = _LRNPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.LRNParameter) + )) +_sym_db.RegisterMessage(LRNParameter) -class MemoryDataParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _MEMORYDATAPARAMETER - +MemoryDataParameter = _reflection.GeneratedProtocolMessageType('MemoryDataParameter', (_message.Message,), dict( + DESCRIPTOR = _MEMORYDATAPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.MemoryDataParameter) + )) +_sym_db.RegisterMessage(MemoryDataParameter) -class MVNParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _MVNPARAMETER - +MVNParameter = _reflection.GeneratedProtocolMessageType('MVNParameter', (_message.Message,), dict( + DESCRIPTOR = _MVNPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.MVNParameter) - -class PoolingParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _POOLINGPARAMETER - + )) +_sym_db.RegisterMessage(MVNParameter) + +ParameterParameter = _reflection.GeneratedProtocolMessageType('ParameterParameter', (_message.Message,), dict( + DESCRIPTOR = _PARAMETERPARAMETER, + __module__ = 'caffe_pb2' + # @@protoc_insertion_point(class_scope:caffe.ParameterParameter) + )) +_sym_db.RegisterMessage(ParameterParameter) + +PoolingParameter = _reflection.GeneratedProtocolMessageType('PoolingParameter', (_message.Message,), dict( + DESCRIPTOR = _POOLINGPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.PoolingParameter) + )) +_sym_db.RegisterMessage(PoolingParameter) -class PowerParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _POWERPARAMETER - +PowerParameter = _reflection.GeneratedProtocolMessageType('PowerParameter', (_message.Message,), dict( + DESCRIPTOR = _POWERPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.PowerParameter) + )) +_sym_db.RegisterMessage(PowerParameter) -class PythonParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _PYTHONPARAMETER - +PythonParameter = _reflection.GeneratedProtocolMessageType('PythonParameter', (_message.Message,), dict( + DESCRIPTOR = _PYTHONPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.PythonParameter) - -class ReductionParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _REDUCTIONPARAMETER - + )) +_sym_db.RegisterMessage(PythonParameter) + +RecurrentParameter = _reflection.GeneratedProtocolMessageType('RecurrentParameter', (_message.Message,), dict( + DESCRIPTOR = _RECURRENTPARAMETER, + __module__ = 'caffe_pb2' + # @@protoc_insertion_point(class_scope:caffe.RecurrentParameter) + )) +_sym_db.RegisterMessage(RecurrentParameter) + +ReductionParameter = _reflection.GeneratedProtocolMessageType('ReductionParameter', (_message.Message,), dict( + DESCRIPTOR = _REDUCTIONPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ReductionParameter) + )) +_sym_db.RegisterMessage(ReductionParameter) -class ReLUParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _RELUPARAMETER - +ReLUParameter = _reflection.GeneratedProtocolMessageType('ReLUParameter', (_message.Message,), dict( + DESCRIPTOR = _RELUPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ReLUParameter) + )) +_sym_db.RegisterMessage(ReLUParameter) -class ReshapeParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _RESHAPEPARAMETER - +ReshapeParameter = _reflection.GeneratedProtocolMessageType('ReshapeParameter', (_message.Message,), dict( + DESCRIPTOR = _RESHAPEPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ReshapeParameter) + )) +_sym_db.RegisterMessage(ReshapeParameter) -class ScaleParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _SCALEPARAMETER - +ScaleParameter = _reflection.GeneratedProtocolMessageType('ScaleParameter', (_message.Message,), dict( + DESCRIPTOR = _SCALEPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ScaleParameter) + )) +_sym_db.RegisterMessage(ScaleParameter) -class SigmoidParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _SIGMOIDPARAMETER - +SigmoidParameter = _reflection.GeneratedProtocolMessageType('SigmoidParameter', (_message.Message,), dict( + DESCRIPTOR = _SIGMOIDPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.SigmoidParameter) + )) +_sym_db.RegisterMessage(SigmoidParameter) -class SliceParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _SLICEPARAMETER - +SliceParameter = _reflection.GeneratedProtocolMessageType('SliceParameter', (_message.Message,), dict( + DESCRIPTOR = _SLICEPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.SliceParameter) + )) +_sym_db.RegisterMessage(SliceParameter) -class SoftmaxParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _SOFTMAXPARAMETER - +SoftmaxParameter = _reflection.GeneratedProtocolMessageType('SoftmaxParameter', (_message.Message,), dict( + DESCRIPTOR = _SOFTMAXPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.SoftmaxParameter) + )) +_sym_db.RegisterMessage(SoftmaxParameter) -class TanHParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _TANHPARAMETER - +TanHParameter = _reflection.GeneratedProtocolMessageType('TanHParameter', (_message.Message,), dict( + DESCRIPTOR = _TANHPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.TanHParameter) + )) +_sym_db.RegisterMessage(TanHParameter) -class TileParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _TILEPARAMETER - +TileParameter = _reflection.GeneratedProtocolMessageType('TileParameter', (_message.Message,), dict( + DESCRIPTOR = _TILEPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.TileParameter) + )) +_sym_db.RegisterMessage(TileParameter) -class ThresholdParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _THRESHOLDPARAMETER - +ThresholdParameter = _reflection.GeneratedProtocolMessageType('ThresholdParameter', (_message.Message,), dict( + DESCRIPTOR = _THRESHOLDPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.ThresholdParameter) + )) +_sym_db.RegisterMessage(ThresholdParameter) -class WindowDataParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _WINDOWDATAPARAMETER - +WindowDataParameter = _reflection.GeneratedProtocolMessageType('WindowDataParameter', (_message.Message,), dict( + DESCRIPTOR = _WINDOWDATAPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.WindowDataParameter) + )) +_sym_db.RegisterMessage(WindowDataParameter) -class SPPParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _SPPPARAMETER - +SPPParameter = _reflection.GeneratedProtocolMessageType('SPPParameter', (_message.Message,), dict( + DESCRIPTOR = _SPPPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.SPPParameter) + )) +_sym_db.RegisterMessage(SPPParameter) -class V1LayerParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _V1LAYERPARAMETER - +V1LayerParameter = _reflection.GeneratedProtocolMessageType('V1LayerParameter', (_message.Message,), dict( + DESCRIPTOR = _V1LAYERPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.V1LayerParameter) + )) +_sym_db.RegisterMessage(V1LayerParameter) -class V0LayerParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _V0LAYERPARAMETER - +V0LayerParameter = _reflection.GeneratedProtocolMessageType('V0LayerParameter', (_message.Message,), dict( + DESCRIPTOR = _V0LAYERPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.V0LayerParameter) + )) +_sym_db.RegisterMessage(V0LayerParameter) -class PReLUParameter(_message.Message, metaclass=_reflection.GeneratedProtocolMessageType): - DESCRIPTOR = _PRELUPARAMETER - +PReLUParameter = _reflection.GeneratedProtocolMessageType('PReLUParameter', (_message.Message,), dict( + DESCRIPTOR = _PRELUPARAMETER, + __module__ = 'caffe_pb2' # @@protoc_insertion_point(class_scope:caffe.PReLUParameter) + )) +_sym_db.RegisterMessage(PReLUParameter) _BLOBSHAPE.fields_by_name['dim'].has_options = True -_BLOBSHAPE.fields_by_name['dim']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\020\001') +_BLOBSHAPE.fields_by_name['dim']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) _BLOBPROTO.fields_by_name['data'].has_options = True -_BLOBPROTO.fields_by_name['data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\020\001') +_BLOBPROTO.fields_by_name['data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) _BLOBPROTO.fields_by_name['diff'].has_options = True -_BLOBPROTO.fields_by_name['diff']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\020\001') +_BLOBPROTO.fields_by_name['diff']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) _BLOBPROTO.fields_by_name['double_data'].has_options = True -_BLOBPROTO.fields_by_name['double_data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\020\001') +_BLOBPROTO.fields_by_name['double_data']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) _BLOBPROTO.fields_by_name['double_diff'].has_options = True -_BLOBPROTO.fields_by_name['double_diff']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), '\020\001') +_BLOBPROTO.fields_by_name['double_diff']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001')) # @@protoc_insertion_point(module_scope) diff --git a/kaffe/caffe/resolver.py b/kaffe/caffe/resolver.py index b9580a7..3c68dda 100644 --- a/kaffe/caffe/resolver.py +++ b/kaffe/caffe/resolver.py @@ -8,19 +8,10 @@ def __init__(self): def import_caffe(self): self.caffe = None - try: - # Try to import PyCaffe first - import caffe - self.caffe = caffe - except ImportError: - # Fall back to the protobuf implementation - from . import caffepb - self.caffepb = caffepb - show_fallback_warning() - if self.caffe: - # Use the protobuf code from the imported distribution. - # This way, Caffe variants with custom layers will work. - self.caffepb = self.caffe.proto.caffe_pb2 + # Fall back to the protobuf implementation + from . import caffepb + self.caffepb = caffepb + show_fallback_warning() self.NetParameter = self.caffepb.NetParameter def has_pycaffe(self): diff --git a/kaffe/core.py b/kaffe/core.py index 3a39d3b..7bf8c83 100644 --- a/kaffe/core.py +++ b/kaffe/core.py @@ -278,7 +278,7 @@ def filter_layers(self, layers): return filtered_layers def make_node(self, layer): - kind = NodeKind.map_raw_kind(layer.type) + kind = NodeKind.map_raw_kind(layer) if kind is None: raise KaffeError('Unknown layer type encountered: %s'%layer.type) return Node(layer.name, kind, layer=layer) diff --git a/kaffe/graph.py b/kaffe/graph.py index bec2b3a..2b35171 100644 --- a/kaffe/graph.py +++ b/kaffe/graph.py @@ -1,6 +1,6 @@ from google.protobuf import text_format -from .caffe import get_caffe_resolver +from .caffe import get_caffe_resolver, caffepb from .errors import KaffeError, print_stderr from .layers import LayerAdapter, LayerType, NodeKind, NodeDispatch from .shapes import TensorShape @@ -114,15 +114,16 @@ def __contains__(self, key): return key in self.node_lut def __str__(self): - hdr = '{:<20} {:<30} {:>20} {:>20}'.format('Type', 'Name', 'Param', 'Output') + hdr = '{:<20} {:<30} {:>20} {:>20}'.format( + 'Type', 'Name', 'Param', 'Output') s = [hdr, '-' * 94] for node in self.topologically_sorted(): # If the node has learned parameters, display the first one's shape. # In case of convolutions, this corresponds to the weights. - data_shape = node.data[0].shape if node.data else '--' - out_shape = node.output_shape or '--' - s.append('{:<20} {:<30} {:>20} {:>20}'.format(node.kind, node.name, data_shape, - tuple(out_shape))) + data_shape = str(node.data[0].shape) if node.data else '--' + out_shape = str(tuple(node.output_shape)) or '--' + s.append('{:<20} {:<30} {:>20} {:>20}'.format( + node.kind, node.name, data_shape, out_shape)) return '\n'.join(s) @@ -142,7 +143,7 @@ def __init__(self, def_path, phase='test'): def load(self): '''Load the layer definitions from the prototxt.''' self.params = get_caffe_resolver().NetParameter() - with open(self.def_path, 'rb') as def_file: + with open(self.def_path, 'r') as def_file: text_format.Merge(def_file.read(), self.params) def filter_layers(self, layers): @@ -171,7 +172,7 @@ def filter_layers(self, layers): def make_node(self, layer): '''Create a graph node for the given layer.''' - kind = NodeKind.map_raw_kind(layer.type) + kind = NodeKind.map_raw_kind(layer) if kind is None: raise KaffeError('Unknown layer type encountered: %s' % layer.type) # We want to use the layer's top names (the "output" names), rather than the diff --git a/kaffe/layers.py b/kaffe/layers.py index 1154237..da579c1 100644 --- a/kaffe/layers.py +++ b/kaffe/layers.py @@ -1,6 +1,7 @@ import re import numbers from collections import namedtuple +from .caffe import caffepb from .shapes import * @@ -51,6 +52,48 @@ 'Threshold': shape_identity, } +V1_TO_NEW = { + 35: 'AbsVal', + 1: 'Accuracy', + 30: 'ArgMax', + 2: 'BNLL', + 3: 'Concat', + 37: 'ContrastiveLoss', + 4: 'Convolution', + 5: 'Data', + 39: 'Deconvolution', + 6: 'Dropout', + 32: 'DummyData', + 7: 'EuclideanLoss', + 25: 'Eltwise', + 38: 'Exp', + 8: 'Flatten', + 9: 'HDF5Data', + 10: 'HDF5Output', + 28: 'HingeLoss', + 11: 'Im2col', + 12: 'ImageData', + 13: 'InfogainLoss', + 14: 'InnerProduct', + 15: 'LRN', + 29: 'MemoryData', + 16: 'MultinomialLogisticLoss', + 34: 'MVN', + 17: 'Pooling', + 26: 'Power', + 18: 'ReLU', + 19: 'Sigmoid', + 27: 'SigmoidCrossEntropyLoss', + 36: 'Silence', + 20: 'Softmax', + 21: 'SoftmaxLoss', + 22: 'Split', + 33: 'Slice', + 23: 'TanH', + 24: 'WindowData', + 31: 'Threshold', +} + LAYER_TYPES = list(LAYER_DESCRIPTORS.keys()) LayerType = type('LayerType', (), {t: t for t in LAYER_TYPES}) @@ -58,7 +101,10 @@ class NodeKind(LayerType): @staticmethod - def map_raw_kind(kind): + def map_raw_kind(layer): + kind = layer.type + if isinstance(layer, caffepb.V1LayerParameter): + kind = V1_TO_NEW[layer.type] if kind in LAYER_TYPES: return kind return None diff --git a/kaffe/tensorflow/transformer.py b/kaffe/tensorflow/transformer.py index 34bfc9a..6f81ee2 100644 --- a/kaffe/tensorflow/transformer.py +++ b/kaffe/tensorflow/transformer.py @@ -44,7 +44,7 @@ def __init__(self, op, *args, **kwargs): def format(self, arg): '''Returns a string representation for the given value.''' - return "'%s'" % arg if isinstance(arg, basestring) else str(arg) + return "'%s'" % arg if isinstance(arg, str) else str(arg) def pair(self, key, value): '''Returns key=formatted(value).''' @@ -53,7 +53,7 @@ def pair(self, key, value): def emit(self): '''Emits the Python source for this node.''' # Format positional arguments - args = map(self.format, self.args) + args = list(map(self.format, self.args)) # Format any keyword arguments if self.kwargs: args += [self.pair(k, v) for k, v in self.kwargs] From 9fd9effd5b1d5231a474ab881e0cec664d0aef23 Mon Sep 17 00:00:00 2001 From: "eliot.andres" Date: Mon, 22 May 2017 11:33:34 +0200 Subject: [PATCH 06/11] Fix issue. --- kaffe/caffe/{caffepb.py => caffe_pb2.py} | 0 kaffe/caffe/resolver.py | 4 ++-- kaffe/graph.py | 2 +- kaffe/layers.py | 4 ++-- 4 files changed, 5 insertions(+), 5 deletions(-) rename kaffe/caffe/{caffepb.py => caffe_pb2.py} (100%) diff --git a/kaffe/caffe/caffepb.py b/kaffe/caffe/caffe_pb2.py similarity index 100% rename from kaffe/caffe/caffepb.py rename to kaffe/caffe/caffe_pb2.py diff --git a/kaffe/caffe/resolver.py b/kaffe/caffe/resolver.py index 3c68dda..4703b51 100644 --- a/kaffe/caffe/resolver.py +++ b/kaffe/caffe/resolver.py @@ -9,8 +9,8 @@ def __init__(self): def import_caffe(self): self.caffe = None # Fall back to the protobuf implementation - from . import caffepb - self.caffepb = caffepb + from . import caffe_pb2 + self.caffepb = caffe_pb2 show_fallback_warning() self.NetParameter = self.caffepb.NetParameter diff --git a/kaffe/graph.py b/kaffe/graph.py index 2b35171..21f9344 100644 --- a/kaffe/graph.py +++ b/kaffe/graph.py @@ -1,6 +1,6 @@ from google.protobuf import text_format -from .caffe import get_caffe_resolver, caffepb +from .caffe import get_caffe_resolver, caffe_pb2 from .errors import KaffeError, print_stderr from .layers import LayerAdapter, LayerType, NodeKind, NodeDispatch from .shapes import TensorShape diff --git a/kaffe/layers.py b/kaffe/layers.py index da579c1..4630968 100644 --- a/kaffe/layers.py +++ b/kaffe/layers.py @@ -1,7 +1,7 @@ import re import numbers from collections import namedtuple -from .caffe import caffepb +from .caffe import caffe_pb2 from .shapes import * @@ -103,7 +103,7 @@ class NodeKind(LayerType): @staticmethod def map_raw_kind(layer): kind = layer.type - if isinstance(layer, caffepb.V1LayerParameter): + if isinstance(layer, caffe_pb2.V1LayerParameter): kind = V1_TO_NEW[layer.type] if kind in LAYER_TYPES: return kind From d2545ef1ffd9f17b2f6b2e7b8350efe1e5f220dc Mon Sep 17 00:00:00 2001 From: "eliot.andres" Date: Mon, 29 May 2017 18:00:22 +0200 Subject: [PATCH 07/11] Update for Tensorflow 1.1. --- kaffe/caffe/{caffepb.py => caffe_pb2.py} | 0 kaffe/caffe/resolver.py | 4 ++-- kaffe/tensorflow/network.py | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) rename kaffe/caffe/{caffepb.py => caffe_pb2.py} (100%) diff --git a/kaffe/caffe/caffepb.py b/kaffe/caffe/caffe_pb2.py similarity index 100% rename from kaffe/caffe/caffepb.py rename to kaffe/caffe/caffe_pb2.py diff --git a/kaffe/caffe/resolver.py b/kaffe/caffe/resolver.py index b9580a7..3de3ea2 100644 --- a/kaffe/caffe/resolver.py +++ b/kaffe/caffe/resolver.py @@ -14,8 +14,8 @@ def import_caffe(self): self.caffe = caffe except ImportError: # Fall back to the protobuf implementation - from . import caffepb - self.caffepb = caffepb + from . import caffe_pb2 + self.caffepb = caffe_pb2 show_fallback_warning() if self.caffe: # Use the protobuf code from the imported distribution. diff --git a/kaffe/tensorflow/network.py b/kaffe/tensorflow/network.py index 6f3b153..3f7f023 100644 --- a/kaffe/tensorflow/network.py +++ b/kaffe/tensorflow/network.py @@ -177,7 +177,7 @@ def lrn(self, input, radius, alpha, beta, name, bias=1.0): @layer def concat(self, inputs, axis, name): - return tf.concat(concat_dim=axis, values=inputs, name=name) + return tf.concat(values=inputs, axis=axis, name=name) @layer def add(self, inputs, name): From 1a5c027b19c6e9d4bc5f2cb4d5906efe46c60466 Mon Sep 17 00:00:00 2001 From: Eliot Andres Date: Mon, 29 May 2017 18:54:22 +0200 Subject: [PATCH 08/11] Update for Standalone use. --- README.md | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/README.md b/README.md index 5e3a6bf..21d3bf6 100644 --- a/README.md +++ b/README.md @@ -2,6 +2,24 @@ Convert [Caffe](https://github.com/BVLC/caffe/) models to [TensorFlow](https://github.com/tensorflow/tensorflow). +## Usage with standalone model + +### 1 - Install caffe-tensorflow + git clone https://github.com/linkfluence/caffe-tensorflow + # Optional: create a Python 2.7 env and activate it + # This fork has only be tested with Python 2.7 + +### 2 - (Optional) Switch to Tensorflow CPU +You might bump into memory issues if you don't have enough memory. In this case just uninstall `tensorflow-gpu` and install `tensorflow` + +### 3 - Convert your model + python convert.py --caffemodel ./model.caffemodel ./model.prototxt --data-output-path ./output.mat --code-output-path ./output.py --standalone-output-path ./standalone.pb` + +### 4 - (Optional) Re-install Tensorflow GPU + +### 5- Use the standalone.pb file +It contains the weights and the architecture of the network. + ## Usage Run `convert.py` to convert an existing Caffe model to TensorFlow. From 78ba4bb200f68bd730daeeed1c17fff2d4647291 Mon Sep 17 00:00:00 2001 From: Paul Bauriegel Date: Thu, 18 Jul 2019 00:15:00 +0300 Subject: [PATCH 09/11] Add additional freezing option --- .gitignore | 4 ++++ convert.py | 59 +++++++++++++++++++++++++++++++++++++----------------- 2 files changed, 45 insertions(+), 18 deletions(-) diff --git a/.gitignore b/.gitignore index f4876f8..8f97be7 100644 --- a/.gitignore +++ b/.gitignore @@ -8,3 +8,7 @@ # Python cache *.pyc +*.pb +*.pbtxt +.tmp/ +.vscode/ diff --git a/convert.py b/convert.py index 1c005ce..05cf0c4 100755 --- a/convert.py +++ b/convert.py @@ -8,8 +8,10 @@ from kaffe.tensorflow import TensorFlowTransformer import shutil +import pickle import tensorflow as tf from tensorflow.python.tools.freeze_graph import freeze_graph +from tensorflow.python.tools import optimize_for_inference_lib def fatal_error(msg): @@ -26,7 +28,8 @@ def validate_arguments(args): fatal_error('No output path specified.') -def convert(def_path, caffemodel_path, data_output_path, code_output_path, standalone_output_path, phase): +def convert(def_path, caffemodel_path, data_output_path, code_output_path, standalone_output_path, + phase, freeze): try: sess = tf.InteractiveSession() transformer = TensorFlowTransformer(def_path, caffemodel_path, phase=phase) @@ -34,8 +37,8 @@ def convert(def_path, caffemodel_path, data_output_path, code_output_path, stand if data_output_path is not None: data = transformer.transform_data() print_stderr('Saving data...') - with open(data_output_path, 'wb') as data_out: - np.save(data_out, data) + with open(data_output_path, 'wb') as handle: + pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL) if code_output_path is not None: print_stderr('Saving source...') with open(code_output_path, 'wb') as src_out: @@ -44,14 +47,16 @@ def convert(def_path, caffemodel_path, data_output_path, code_output_path, stand if standalone_output_path: filename, _ = os.path.splitext(os.path.basename(standalone_output_path)) temp_folder = os.path.join(os.path.dirname(standalone_output_path), '.tmp') - os.makedirs(temp_folder) + if not os.path.exists(temp_folder): + os.makedirs(temp_folder) + shutil.rmtree(temp_folder) # Delete old graphs if data_output_path is None: data = transformer.transform_data() print_stderr('Saving data...') data_output_path = os.path.join(temp_folder, filename) + '.npy' - with open(data_output_path, 'wb') as data_out: - np.save(data_out, data) + with open(data_output_path, 'wb') as handle: + pickle.dump(data, handle, protocol=pickle.HIGHEST_PROTOCOL) if code_output_path is None: print_stderr('Saving source...') @@ -65,25 +70,29 @@ def convert(def_path, caffemodel_path, data_output_path, code_output_path, stand input_node = transformer.graph.nodes[0].name output_node = transformer.graph.nodes[-1].name tensor_shape = transformer.graph.get_node(input_node).output_shape - tensor_shape_list = [tensor_shape.batch_size, tensor_shape.height, tensor_shape.width, tensor_shape.channels] + tensor_shape_list = [tensor_shape.batch_size, tensor_shape.height, + tensor_shape.width, tensor_shape.channels] sys.path.append(os.path.dirname(code_output_path)) module = os.path.splitext(os.path.basename(code_output_path))[0] class_name = transformer.graph.name KaffeNet = getattr(__import__(module), class_name) - data_placeholder = tf.placeholder(tf.float32, tensor_shape_list, name=input_node) + data_placeholder = tf.compat.v1.placeholder( + tf.float32, tensor_shape_list, name=input_node) net = KaffeNet({input_node: data_placeholder}) # load weights stored in numpy format net.load(data_output_path, sess) print_stderr('Saving checkpoint...') - saver = tf.train.Saver() + saver = tf.compat.v1.train.Saver() saver.save(sess, checkpoint_path) print_stderr('Saving graph definition as protobuf...') - tf.train.write_graph(sess.graph.as_graph_def(), graph_folder, graph_name, False) + tf.io.write_graph(sess.graph.as_graph_def(), graph_folder, graph_name, False) + writer = tf.compat.v1.summary.FileWriter('.tmp', sess.graph) + writer.close() input_graph_path = standalone_output_path input_saver_def_path = "" @@ -96,13 +105,23 @@ def convert(def_path, caffemodel_path, data_output_path, code_output_path, stand clear_devices = True print_stderr('Saving standalone model...') - freeze_graph(input_graph_path, input_saver_def_path, - input_binary, input_checkpoint_path, - output_node_names, restore_op_name, - filename_tensor_name, output_graph_path, - clear_devices, '') - - shutil.rmtree(temp_folder) + output_node_names = '{0}/{0}'.format(output_node_names) + if freeze == 'freeze_graph': + freeze_graph(input_graph_path, input_saver_def_path, + input_binary, input_checkpoint_path, + output_node_names, restore_op_name, + filename_tensor_name, output_graph_path, + clear_devices, '') + elif freeze == 'optimize_for_inference': + graph_def = sess.graph.as_graph_def() + graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, [output_node_names]) + graph_def_f32 = optimize_for_inference_lib.optimize_for_inference(graph_def, ['data'], [output_node_names], tf.float32.as_datatype_enum) + tf.train.write_graph(graph_def_f32, "", 'graph_f32.pb', as_text=False) + tf.train.write_graph(graph_def_f32, "", 'graph_f32.pbtxt', as_text=True) + + #f = shutil.rmtree(temp_folder) + writer = tf.compat.v1.summary.FileWriter('.tmp', sess.graph) + writer.close() print_stderr('Done.') except KaffeError as err: @@ -120,10 +139,14 @@ def main(): '--phase', default='test', help='The phase to convert: test (default) or train') + parser.add_argument('-fz', + '--freeze', + default=None, + help='Freeze option for inference: No (default), freeze_graph or optimize_for_inference(e.g. for OpenCV)') args = parser.parse_args() validate_arguments(args) convert(args.def_path, args.caffemodel, args.data_output_path, args.code_output_path, - args.standalone_output_path, args.phase) + args.standalone_output_path, args.phase, args.freeze) if __name__ == '__main__': From 8372493906b46146a7c772b5695b4ba7af1b7399 Mon Sep 17 00:00:00 2001 From: Paul Bauriegel Date: Thu, 18 Jul 2019 00:15:47 +0300 Subject: [PATCH 10/11] Enable PReLU working as extra layer --- .gitignore | 1 + kaffe/graph.py | 7 +- kaffe/layers.py | 3 +- kaffe/tensorflow/network.py | 158 +++++++++++++++++++++++--------- kaffe/tensorflow/transformer.py | 16 +++- kaffe/transformers.py | 17 +++- 6 files changed, 154 insertions(+), 48 deletions(-) diff --git a/.gitignore b/.gitignore index 8f97be7..2ba7ff3 100644 --- a/.gitignore +++ b/.gitignore @@ -10,5 +10,6 @@ *.pb *.pbtxt +*.mat .tmp/ .vscode/ diff --git a/kaffe/graph.py b/kaffe/graph.py index bec2b3a..045c4d8 100644 --- a/kaffe/graph.py +++ b/kaffe/graph.py @@ -119,7 +119,12 @@ def __str__(self): for node in self.topologically_sorted(): # If the node has learned parameters, display the first one's shape. # In case of convolutions, this corresponds to the weights. - data_shape = node.data[0].shape if node.data else '--' + if node.data is None: + data_shape = '--' + elif isinstance(node.data, dict): + data_shape = node.data['weights'].shape#'dict({})'.format(node.data.keys()) + else: + data_shape = node.data[0].shape out_shape = node.output_shape or '--' s.append('{:<20} {:<30} {:>20} {:>20}'.format(node.kind, node.name, data_shape, tuple(out_shape))) diff --git a/kaffe/layers.py b/kaffe/layers.py index c3c5955..bdcf26f 100644 --- a/kaffe/layers.py +++ b/kaffe/layers.py @@ -38,6 +38,7 @@ 'Pooling': shape_pool, 'Power': shape_identity, 'ReLU': shape_identity, + 'PReLU': shape_identity, 'Scale': shape_identity, 'Sigmoid': shape_identity, 'SigmoidCrossEntropyLoss': shape_scalar, @@ -81,7 +82,7 @@ class NodeDispatch(object): @staticmethod def get_handler_name(node_kind): - if len(node_kind) <= 4: + if len(node_kind) <= 4 or node_kind == 'PReLU': # A catch-all for things like ReLU and tanh return node_kind.lower() # Convert from CamelCase to under_scored diff --git a/kaffe/tensorflow/network.py b/kaffe/tensorflow/network.py index 3f7f023..1abecce 100644 --- a/kaffe/tensorflow/network.py +++ b/kaffe/tensorflow/network.py @@ -1,6 +1,11 @@ import numpy as np +import pickle import tensorflow as tf +from tensorflow.python.framework import ops +from tensorflow.python.ops import math_ops +from tensorflow.python.ops import nn_ops + DEFAULT_PADDING = 'SAME' @@ -41,9 +46,9 @@ def __init__(self, inputs, trainable=True): # If true, the resulting variables are set as trainable self.trainable = trainable # Switch variable for dropout - self.use_dropout = tf.placeholder_with_default(tf.constant(1.0), - shape=[], - name='use_dropout') + self.use_dropout = tf.compat.v1.placeholder_with_default(tf.constant(1.0), + shape=[], + name='use_dropout') self.setup() def setup(self): @@ -56,16 +61,25 @@ def load(self, data_path, session, ignore_missing=False): session: The current TensorFlow session ignore_missing: If true, serialized weights for missing layers are ignored. ''' - data_dict = np.load(data_path).item() + with open(data_path, 'rb') as handle: + data_dict = pickle.load(handle) for op_name in data_dict: - with tf.variable_scope(op_name, reuse=True): - for param_name, data in data_dict[op_name].iteritems(): + with tf.compat.v1.variable_scope(op_name, reuse=True): + if 'relu' in op_name: try: - var = tf.get_variable(param_name) - session.run(var.assign(data)) + var = tf.compat.v1.get_variable(op_name) + session.run(var.assign(data_dict[op_name][0])) except ValueError: if not ignore_missing: raise + else: + for param_name, data in data_dict[op_name].iteritems(): + try: + var = tf.compat.v1.get_variable(param_name) + session.run(var.assign(data)) + except ValueError: + if not ignore_missing: + raise def feed(self, *args): '''Set the input(s) for the next operation by replacing the terminal nodes. @@ -95,15 +109,34 @@ def get_unique_name(self, prefix): def make_var(self, name, shape): '''Creates a new TensorFlow variable.''' - return tf.get_variable(name, shape, trainable=self.trainable) + return tf.compat.v1.get_variable(name, shape, trainable=self.trainable) def validate_padding(self, padding): '''Verifies that the padding is one of the supported ones.''' assert padding in ('SAME', 'VALID') + def prelu_layer(self, x, weights, biases, name=None): + """Computes PRelu(x * weight + biases). + Args: + x: a 2D tensor. Dimensions typically: batch, in_units + weights: a 2D tensor. Dimensions typically: in_units, out_units + biases: a 1D tensor. Dimensions: out_units + name: A name for the operation (optional). If not specified + "nn_prelu_layer" is used. + Returns: + A 2-D Tensor computing prelu(matmul(x, weights) + biases). + Dimensions typically: batch, out_units. + """ + with ops.name_scope(name, "prelu_layer", [x, weights, biases]) as name: + x = ops.convert_to_tensor(x, name="x") + weights = ops.convert_to_tensor(weights, name="weights") + biases = ops.convert_to_tensor(biases, name="biases") + xw_plus_b = nn_ops.bias_add(math_ops.matmul(x, weights), biases) + return self.parametric_relu(xw_plus_b, name=name) + @layer def conv(self, - input, + inputs, k_h, k_w, c_o, @@ -111,26 +144,27 @@ def conv(self, s_w, name, relu=True, + prelu=False, padding=DEFAULT_PADDING, group=1, biased=True): # Verify that the padding is acceptable self.validate_padding(padding) # Get the number of channels in the input - c_i = input.get_shape()[-1] + c_i = inputs.get_shape()[-1] # Verify that the grouping parameter is valid assert c_i % group == 0 assert c_o % group == 0 # Convolution for a given input and kernel convolve = lambda i, k: tf.nn.conv2d(i, k, [1, s_h, s_w, 1], padding=padding) - with tf.variable_scope(name) as scope: + with tf.compat.v1.variable_scope(name) as scope: kernel = self.make_var('weights', shape=[k_h, k_w, c_i / group, c_o]) if group == 1: # This is the common-case. Convolve the input without any further complications. - output = convolve(input, kernel) + output = convolve(inputs, kernel) else: # Split the input into groups and then convolve each of them independently - input_groups = tf.split(3, group, input) + input_groups = tf.split(3, group, inputs) kernel_groups = tf.split(3, group, kernel) output_groups = [convolve(i, k) for i, k in zip(input_groups, kernel_groups)] # Concatenate the groups @@ -142,33 +176,66 @@ def conv(self, if relu: # ReLU non-linearity output = tf.nn.relu(output, name=scope.name) + elif prelu: + output = self.parametric_relu(output, scope=scope) return output @layer - def relu(self, input, name): - return tf.nn.relu(input, name=name) + def relu(self, x, name): + return tf.nn.relu(x, name=name) + + @layer + def prelu(self, x, name): + return self.parametric_relu(x, name=name) + + def parametric_relu(self, x, scope=None, name="PReLU"): + """ PReLU. + + Parametric Rectified Linear Unit. Base on: + https://github.com/tflearn/tflearn/blob/5c23566de6e614a36252a5828d107d001a0d0482/tflearn/activations.py#L188 + + Arguments: + x: A `Tensor` with type `float`, `double`, `int32`, `int64`, `uint8`, + `int16`, or `int8`. + name: A name for this activation op (optional). + + Returns: + A `Tensor` with the same type as `x`. + """ + #tf.zeros(x.shape, dtype=dtype) + with tf.compat.v1.variable_scope(scope, default_name=name, values=[x]) as scope: + #W_init=tf.constant_initializer(0.0) + #alphas = tf.compat.v1.get_variable(name="alphas", shape=x.get_shape()[-1], + # initializer=W_init, + # dtype=tf.float32) + alphas = self.make_var(name, x.get_shape()[-1]) + x = tf.nn.relu(x) + tf.multiply(alphas, (x - tf.abs(x))) * 0.5 + + x.scope = scope + x.alphas = alphas + return x @layer - def max_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING): + def max_pool(self, x, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING): self.validate_padding(padding) - return tf.nn.max_pool(input, - ksize=[1, k_h, k_w, 1], - strides=[1, s_h, s_w, 1], - padding=padding, - name=name) + return tf.nn.max_pool2d(x, + ksize=[1, k_h, k_w, 1], + strides=[1, s_h, s_w, 1], + padding=padding, + name=name) @layer - def avg_pool(self, input, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING): + def avg_pool(self, x, k_h, k_w, s_h, s_w, name, padding=DEFAULT_PADDING): self.validate_padding(padding) - return tf.nn.avg_pool(input, + return tf.nn.avg_pool(x, ksize=[1, k_h, k_w, 1], strides=[1, s_h, s_w, 1], padding=padding, name=name) @layer - def lrn(self, input, radius, alpha, beta, name, bias=1.0): - return tf.nn.local_response_normalization(input, + def lrn(self, x, radius, alpha, beta, name, bias=1.0): + return tf.nn.local_response_normalization(x, depth_radius=radius, alpha=alpha, beta=beta, @@ -184,48 +251,53 @@ def add(self, inputs, name): return tf.add_n(inputs, name=name) @layer - def fc(self, input, num_out, name, relu=True): - with tf.variable_scope(name) as scope: - input_shape = input.get_shape() + def fc(self, x, num_out, name, relu=True, prelu=False): + with tf.compat.v1.variable_scope(name) as scope: + input_shape = x.get_shape() if input_shape.ndims == 4: # The input is spatial. Vectorize it first. dim = 1 for d in input_shape[1:].as_list(): dim *= d - feed_in = tf.reshape(input, [-1, dim]) + feed_in = tf.reshape(x, [-1, dim]) else: - feed_in, dim = (input, input_shape[-1].value) + feed_in, dim = (x, input_shape[-1].value) weights = self.make_var('weights', shape=[dim, num_out]) biases = self.make_var('biases', [num_out]) - op = tf.nn.relu_layer if relu else tf.nn.xw_plus_b + if relu: + op = tf.nn.relu_layer + elif prelu: + op = self.prelu_layer + else: + op = tf.nn.xw_plus_b fc = op(feed_in, weights, biases, name=scope.name) return fc @layer - def softmax(self, input, name): - input_shape = map(lambda v: v.value, input.get_shape()) + def softmax(self, x, name): + input_shape = map(lambda v: v.value, x.get_shape()) if len(input_shape) > 2: # For certain models (like NiN), the singleton spatial dimensions # need to be explicitly squeezed, since they're not broadcast-able # in TensorFlow's NHWC ordering (unlike Caffe's NCHW). if input_shape[1] == 1 and input_shape[2] == 1: - input = tf.squeeze(input, squeeze_dims=[1, 2]) + x = tf.squeeze(x, squeeze_dims=[1, 2]) else: raise ValueError('Rank 2 tensor input expected for softmax!') - return tf.nn.softmax(input, name=name) + return tf.nn.softmax(x, name=name) @layer - def batch_normalization(self, input, name, scale_offset=True, relu=False): + def batch_normalization(self, x, name, scale_offset=True, relu=False, prelu=False): # NOTE: Currently, only inference is supported - with tf.variable_scope(name) as scope: - shape = [input.get_shape()[-1]] + with tf.compat.v1.variable_scope(name) as scope: + shape = [x.get_shape()[-1]] if scale_offset: scale = self.make_var('scale', shape=shape) offset = self.make_var('offset', shape=shape) else: scale, offset = (None, None) output = tf.nn.batch_normalization( - input, + x, mean=self.make_var('mean', shape=shape), variance=self.make_var('variance', shape=shape), offset=offset, @@ -236,9 +308,11 @@ def batch_normalization(self, input, name, scale_offset=True, relu=False): name=name) if relu: output = tf.nn.relu(output) + elif prelu: + output = self.parametric_relu(output, name=scope.name) return output @layer - def dropout(self, input, keep_prob, name): + def dropout(self, x, keep_prob, name): keep = 1 - self.use_dropout + (self.use_dropout * keep_prob) - return tf.nn.dropout(input, keep, name=name) + return tf.nn.dropout(x, keep, name=name) diff --git a/kaffe/tensorflow/transformer.py b/kaffe/tensorflow/transformer.py index 68e37af..2fdb96e 100644 --- a/kaffe/tensorflow/transformer.py +++ b/kaffe/tensorflow/transformer.py @@ -3,7 +3,7 @@ from ..errors import KaffeError, print_stderr from ..graph import GraphBuilder, NodeMapper from ..layers import NodeKind -from ..transformers import (DataInjector, DataReshaper, NodeRenamer, ReLUFuser, +from ..transformers import (DataInjector, DataReshaper, NodeRenamer, ReLUFuser, PReLUFuser, BatchNormScaleBiasFuser, BatchNormPreprocessor, ParameterNamer) from . import network @@ -69,6 +69,8 @@ def __init__(self, node, default=True): self.inject_kwargs = {} if node.metadata.get('relu', False) != default: self.inject_kwargs['relu'] = not default + if node.metadata.get('prelu'): + self.inject_kwargs['prelu'] = node.metadata.get('prelu') def __call__(self, *args, **kwargs): kwargs.update(self.inject_kwargs) @@ -103,6 +105,9 @@ def map_convolution(self, node): def map_relu(self, node): return TensorFlowNode('relu') + + def map_prelu(self, node): + return TensorFlowNode('prelu') def map_pooling(self, node): pool_type = node.parameters.pool @@ -229,7 +234,7 @@ def load(self, def_path, data_path, phase): if data_path is not None: # Load and associate learned parameters graph = DataInjector(def_path, data_path)(graph) - + # Transform the graph transformers = [ # Fuse split batch normalization layers @@ -240,6 +245,8 @@ def load(self, def_path, data_path, phase): # any arbitrary operation to be optionally activated. ReLUFuser(allowed_parent_types=[NodeKind.Convolution, NodeKind.InnerProduct, NodeKind.BatchNorm]), + #PReLUFuser(allowed_parent_types=[NodeKind.Convolution, NodeKind.InnerProduct, + # NodeKind.BatchNorm]), # Rename nodes # Slashes are used for scoping in TensorFlow. Replace slashes @@ -263,7 +270,10 @@ def transform_data(self): NodeKind.Convolution: (2, 3, 1, 0), # (c_o, c_i) -> (c_i, c_o) - NodeKind.InnerProduct: (1, 0) + NodeKind.InnerProduct: (1, 0), + + # one dimensional + NodeKind.PReLU: (0) }), # Pre-process batch normalization data diff --git a/kaffe/transformers.py b/kaffe/transformers.py index cd8a07d..593b61f 100644 --- a/kaffe/transformers.py +++ b/kaffe/transformers.py @@ -51,7 +51,7 @@ def load_using_pb(self): def normalize_pb_data(self, layer): transformed = [] for blob in layer.blobs: - if len(blob.shape.dim): + if blob.shape.dim: dims = blob.shape.dim c_o, c_i, h, w = map(int, [1] * (4 - len(dims)) + list(dims)) else: @@ -122,6 +122,7 @@ def __call__(self, graph): # Check for 2+ dimensional data if any(len(tensor.shape) > 1 for tensor in node.data): print_stderr('Warning: parmaters not reshaped for node: {}'.format(node)) + print('Some infos', node.kind, self.reshaped_node_types) continue transpose_order = self.map(node.kind) weights = node.data[0] @@ -205,6 +206,20 @@ def is_eligible_pair(self, parent, child): def merge(self, parent, _): parent.metadata['relu'] = True +class PReLUFuser(SubNodeFuser): + """ Fuses parametric rectified linear units with their parent nodes. + See ReLUFuser as reference + """ + + def __init__(self, allowed_parent_types=None): + self.allowed_parent_types = allowed_parent_types + + def is_eligible_pair(self, parent, child): + return ((self.allowed_parent_types is None or parent.kind in self.allowed_parent_types) and + child.kind == NodeKind.PReLU) + + def merge(self, parent, _): + parent.metadata['prelu'] = True class BatchNormScaleBiasFuser(SubNodeFuser): ''' From 8d6b430234b04200c60545bafa0e998ff034fc07 Mon Sep 17 00:00:00 2001 From: Paul Bauriegel Date: Thu, 18 Jul 2019 16:29:35 +0300 Subject: [PATCH 11/11] Fix unhandled Parameter Error --- .gitignore | 4 ++++ convert.py | 40 ++++++++++++++++++++++--------------- kaffe/tensorflow/network.py | 3 ++- kaffe/transformers.py | 21 +++++++++++++++++++ 4 files changed, 51 insertions(+), 17 deletions(-) diff --git a/.gitignore b/.gitignore index 2ba7ff3..58c271d 100644 --- a/.gitignore +++ b/.gitignore @@ -8,8 +8,12 @@ # Python cache *.pyc +# Ignore Results *.pb *.pbtxt *.mat .tmp/ + +# Ignore Development setting .vscode/ +.ipynb_checkpoints/ diff --git a/convert.py b/convert.py index 05cf0c4..e5fd09d 100755 --- a/convert.py +++ b/convert.py @@ -2,10 +2,7 @@ import os import sys -import numpy as np import argparse -from kaffe import KaffeError, print_stderr -from kaffe.tensorflow import TensorFlowTransformer import shutil import pickle @@ -13,6 +10,9 @@ from tensorflow.python.tools.freeze_graph import freeze_graph from tensorflow.python.tools import optimize_for_inference_lib +from kaffe import KaffeError, print_stderr +from kaffe.tensorflow import TensorFlowTransformer + def fatal_error(msg): print_stderr(msg) @@ -22,13 +22,15 @@ def fatal_error(msg): def validate_arguments(args): if (args.data_output_path is not None) and (args.caffemodel is None): fatal_error('No input data path provided.') - if (args.caffemodel is not None) and (args.data_output_path is None) and (args.standalone_output_path is None): + if (args.caffemodel is not None) and (args.data_output_path is None) and \ + (args.standalone_output_path is None): fatal_error('No output data path provided.') - if (args.code_output_path is None) and (args.data_output_path is None) and (args.standalone_output_path is None): + if (args.code_output_path is None) and (args.data_output_path is None) and \ + (args.standalone_output_path is None): fatal_error('No output path specified.') -def convert(def_path, caffemodel_path, data_output_path, code_output_path, standalone_output_path, +def convert(def_path, caffemodel_path, data_output_path, code_output_path, standalone_output_path, phase, freeze): try: sess = tf.InteractiveSession() @@ -108,16 +110,20 @@ def convert(def_path, caffemodel_path, data_output_path, code_output_path, stand output_node_names = '{0}/{0}'.format(output_node_names) if freeze == 'freeze_graph': freeze_graph(input_graph_path, input_saver_def_path, - input_binary, input_checkpoint_path, - output_node_names, restore_op_name, - filename_tensor_name, output_graph_path, - clear_devices, '') + input_binary, input_checkpoint_path, + output_node_names, restore_op_name, + filename_tensor_name, output_graph_path, + clear_devices, '') elif freeze == 'optimize_for_inference': graph_def = sess.graph.as_graph_def() - graph_def = tf.graph_util.convert_variables_to_constants(sess, graph_def, [output_node_names]) - graph_def_f32 = optimize_for_inference_lib.optimize_for_inference(graph_def, ['data'], [output_node_names], tf.float32.as_datatype_enum) - tf.train.write_graph(graph_def_f32, "", 'graph_f32.pb', as_text=False) - tf.train.write_graph(graph_def_f32, "", 'graph_f32.pbtxt', as_text=True) + graph_def = tf.graph_util.convert_variables_to_constants( + sess, graph_def, [output_node_names]) + graph_def_f32 = optimize_for_inference_lib.optimize_for_inference( + graph_def, ['data'], [output_node_names], tf.float32.as_datatype_enum) + tf.train.write_graph( + graph_def_f32, "", standalone_output_path.rsplit('.',1)[0] + '.pb', as_text=False) + tf.train.write_graph( + graph_def_f32, "", standalone_output_path.rsplit('.',1)[0] + '.pbtxt', as_text=True) #f = shutil.rmtree(temp_folder) writer = tf.compat.v1.summary.FileWriter('.tmp', sess.graph) @@ -134,7 +140,8 @@ def main(): parser.add_argument('--caffemodel', help='Model data (.caffemodel) path') parser.add_argument('--data-output-path', help='Converted data output path') parser.add_argument('--code-output-path', help='Save generated source to this path') - parser.add_argument('--standalone-output-path', help='Save generated standalone tensorflow model to this path') + parser.add_argument('--standalone-output-path', + help='Save generated standalone tensorflow model to this path') parser.add_argument('-p', '--phase', default='test', @@ -142,7 +149,8 @@ def main(): parser.add_argument('-fz', '--freeze', default=None, - help='Freeze option for inference: No (default), freeze_graph or optimize_for_inference(e.g. for OpenCV)') + help="""Freeze option for inference: No (default), + freeze_graph or optimize_for_inference(e.g. for OpenCV)""") args = parser.parse_args() validate_arguments(args) convert(args.def_path, args.caffemodel, args.data_output_path, args.code_output_path, diff --git a/kaffe/tensorflow/network.py b/kaffe/tensorflow/network.py index 1abecce..a9c1bf0 100644 --- a/kaffe/tensorflow/network.py +++ b/kaffe/tensorflow/network.py @@ -65,6 +65,7 @@ def load(self, data_path, session, ignore_missing=False): data_dict = pickle.load(handle) for op_name in data_dict: with tf.compat.v1.variable_scope(op_name, reuse=True): + # TODO not sure why name mapping does not work if 'relu' in op_name: try: var = tf.compat.v1.get_variable(op_name) @@ -269,7 +270,7 @@ def fc(self, x, num_out, name, relu=True, prelu=False): elif prelu: op = self.prelu_layer else: - op = tf.nn.xw_plus_b + op = tf.compat.v1.nn.xw_plus_b fc = op(feed_in, weights, biases, name=scope.name) return fc diff --git a/kaffe/transformers.py b/kaffe/transformers.py index 593b61f..44c54d4 100644 --- a/kaffe/transformers.py +++ b/kaffe/transformers.py @@ -297,6 +297,27 @@ def __call__(self, graph): names = ('mean', 'variance') if len(node.data) == 4: names += ('scale', 'offset') + elif node.kind == NodeKind.PReLU: + names = ('weights',) + # TODO Not sure how to handle PReLUParameter shapes + # Missing example caffe model to test + # https://caffe.berkeleyvision.org/tutorial/layers/prelu.html + + continue + # optional FillerParameter filler = 1; + if node.parameters.filler: #caffe_pb2.FillerParameter + print(node.parameters.filler.type) + print(node.parameters.filler.value) + print(node.parameters.filler.min) + print(node.parameters.filler.max) + print(node.parameters.filler.mean) + print(node.parameters.filler.std) + print(node.parameters.filler.sparse) + print(node.parameters.filler.variance_norm) + + # optional bool channel_shared = 2 [default = false]; + if node.parameters.channel_shared: # type bool + print(node.parameters.channel_shared) else: print_stderr('WARNING: Unhandled parameters: {}'.format(node.kind)) continue