PageRenderTime 61ms CodeModel.GetById 22ms RepoModel.GetById 0ms app.codeStats 0ms

/Examples/MobileNets/convert/caffe.proto

https://github.com/hollance/Forge
Protocol Buffers | 1399 lines | 697 code | 146 blank | 556 comment | 0 complexity | 1b456de4b125832a31c34eb57d1f6cf5 MD5 | raw file
  1. syntax = "proto2";
  2. package caffe;
  3. // Specifies the shape (dimensions) of a Blob.
  4. message BlobShape {
  5. repeated int64 dim = 1 [packed = true];
  6. }
  7. message BlobProto {
  8. optional BlobShape shape = 7;
  9. repeated float data = 5 [packed = true];
  10. repeated float diff = 6 [packed = true];
  11. repeated double double_data = 8 [packed = true];
  12. repeated double double_diff = 9 [packed = true];
  13. // 4D dimensions -- deprecated. Use "shape" instead.
  14. optional int32 num = 1 [default = 0];
  15. optional int32 channels = 2 [default = 0];
  16. optional int32 height = 3 [default = 0];
  17. optional int32 width = 4 [default = 0];
  18. }
  19. // The BlobProtoVector is simply a way to pass multiple blobproto instances
  20. // around.
  21. message BlobProtoVector {
  22. repeated BlobProto blobs = 1;
  23. }
  24. message Datum {
  25. optional int32 channels = 1;
  26. optional int32 height = 2;
  27. optional int32 width = 3;
  28. // the actual image data, in bytes
  29. optional bytes data = 4;
  30. optional int32 label = 5;
  31. // Optionally, the datum could also hold float data.
  32. repeated float float_data = 6;
  33. // If true data contains an encoded image that need to be decoded
  34. optional bool encoded = 7 [default = false];
  35. }
  36. message FillerParameter {
  37. // The filler type.
  38. optional string type = 1 [default = 'constant'];
  39. optional float value = 2 [default = 0]; // the value in constant filler
  40. optional float min = 3 [default = 0]; // the min value in uniform filler
  41. optional float max = 4 [default = 1]; // the max value in uniform filler
  42. optional float mean = 5 [default = 0]; // the mean value in Gaussian filler
  43. optional float std = 6 [default = 1]; // the std value in Gaussian filler
  44. // The expected number of non-zero output weights for a given input in
  45. // Gaussian filler -- the default -1 means don't perform sparsification.
  46. optional int32 sparse = 7 [default = -1];
  47. // Normalize the filler variance by fan_in, fan_out, or their average.
  48. // Applies to 'xavier' and 'msra' fillers.
  49. enum VarianceNorm {
  50. FAN_IN = 0;
  51. FAN_OUT = 1;
  52. AVERAGE = 2;
  53. }
  54. optional VarianceNorm variance_norm = 8 [default = FAN_IN];
  55. }
  56. message NetParameter {
  57. optional string name = 1; // consider giving the network a name
  58. // DEPRECATED. See InputParameter. The input blobs to the network.
  59. repeated string input = 3;
  60. // DEPRECATED. See InputParameter. The shape of the input blobs.
  61. repeated BlobShape input_shape = 8;
  62. // 4D input dimensions -- deprecated. Use "input_shape" instead.
  63. // If specified, for each input blob there should be four
  64. // values specifying the num, channels, height and width of the input blob.
  65. // Thus, there should be a total of (4 * #input) numbers.
  66. repeated int32 input_dim = 4;
  67. // Whether the network will force every layer to carry out backward operation.
  68. // If set False, then whether to carry out backward is determined
  69. // automatically according to the net structure and learning rates.
  70. optional bool force_backward = 5 [default = false];
  71. // The current "state" of the network, including the phase, level, and stage.
  72. // Some layers may be included/excluded depending on this state and the states
  73. // specified in the layers' include and exclude fields.
  74. optional NetState state = 6;
  75. // Print debugging information about results while running Net::Forward,
  76. // Net::Backward, and Net::Update.
  77. optional bool debug_info = 7 [default = false];
  78. // The layers that make up the net. Each of their configurations, including
  79. // connectivity and behavior, is specified as a LayerParameter.
  80. repeated LayerParameter layer = 100; // ID 100 so layers are printed last.
  81. // DEPRECATED: use 'layer' instead.
  82. repeated V1LayerParameter layers = 2;
  83. }
  84. // NOTE
  85. // Update the next available ID when you add a new SolverParameter field.
  86. //
  87. // SolverParameter next available ID: 41 (last added: type)
  88. message SolverParameter {
  89. //////////////////////////////////////////////////////////////////////////////
  90. // Specifying the train and test networks
  91. //
  92. // Exactly one train net must be specified using one of the following fields:
  93. // train_net_param, train_net, net_param, net
  94. // One or more test nets may be specified using any of the following fields:
  95. // test_net_param, test_net, net_param, net
  96. // If more than one test net field is specified (e.g., both net and
  97. // test_net are specified), they will be evaluated in the field order given
  98. // above: (1) test_net_param, (2) test_net, (3) net_param/net.
  99. // A test_iter must be specified for each test_net.
  100. // A test_level and/or a test_stage may also be specified for each test_net.
  101. //////////////////////////////////////////////////////////////////////////////
  102. // Proto filename for the train net, possibly combined with one or more
  103. // test nets.
  104. optional string net = 24;
  105. // Inline train net param, possibly combined with one or more test nets.
  106. optional NetParameter net_param = 25;
  107. optional string train_net = 1; // Proto filename for the train net.
  108. repeated string test_net = 2; // Proto filenames for the test nets.
  109. optional NetParameter train_net_param = 21; // Inline train net params.
  110. repeated NetParameter test_net_param = 22; // Inline test net params.
  111. // The states for the train/test nets. Must be unspecified or
  112. // specified once per net.
  113. //
  114. // By default, all states will have solver = true;
  115. // train_state will have phase = TRAIN,
  116. // and all test_state's will have phase = TEST.
  117. // Other defaults are set according to the NetState defaults.
  118. optional NetState train_state = 26;
  119. repeated NetState test_state = 27;
  120. // The number of iterations for each test net.
  121. repeated int32 test_iter = 3;
  122. // The number of iterations between two testing phases.
  123. optional int32 test_interval = 4 [default = 0];
  124. optional bool test_compute_loss = 19 [default = false];
  125. // If true, run an initial test pass before the first iteration,
  126. // ensuring memory availability and printing the starting value of the loss.
  127. optional bool test_initialization = 32 [default = true];
  128. optional float base_lr = 5; // The base learning rate
  129. // the number of iterations between displaying info. If display = 0, no info
  130. // will be displayed.
  131. optional int32 display = 6;
  132. // Display the loss averaged over the last average_loss iterations
  133. optional int32 average_loss = 33 [default = 1];
  134. optional int32 max_iter = 7; // the maximum number of iterations
  135. // accumulate gradients over `iter_size` x `batch_size` instances
  136. optional int32 iter_size = 36 [default = 1];
  137. // The learning rate decay policy. The currently implemented learning rate
  138. // policies are as follows:
  139. // - fixed: always return base_lr.
  140. // - step: return base_lr * gamma ^ (floor(iter / step))
  141. // - exp: return base_lr * gamma ^ iter
  142. // - inv: return base_lr * (1 + gamma * iter) ^ (- power)
  143. // - multistep: similar to step but it allows non uniform steps defined by
  144. // stepvalue
  145. // - poly: the effective learning rate follows a polynomial decay, to be
  146. // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power)
  147. // - sigmoid: the effective learning rate follows a sigmod decay
  148. // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize))))
  149. //
  150. // where base_lr, max_iter, gamma, step, stepvalue and power are defined
  151. // in the solver parameter protocol buffer, and iter is the current iteration.
  152. optional string lr_policy = 8;
  153. optional float gamma = 9; // The parameter to compute the learning rate.
  154. optional float power = 10; // The parameter to compute the learning rate.
  155. optional float momentum = 11; // The momentum value.
  156. optional float weight_decay = 12; // The weight decay.
  157. // regularization types supported: L1 and L2
  158. // controlled by weight_decay
  159. optional string regularization_type = 29 [default = "L2"];
  160. // the stepsize for learning rate policy "step"
  161. optional int32 stepsize = 13;
  162. // the stepsize for learning rate policy "multistep"
  163. repeated int32 stepvalue = 34;
  164. // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm,
  165. // whenever their actual L2 norm is larger.
  166. optional float clip_gradients = 35 [default = -1];
  167. optional int32 snapshot = 14 [default = 0]; // The snapshot interval
  168. optional string snapshot_prefix = 15; // The prefix for the snapshot.
  169. // whether to snapshot diff in the results or not. Snapshotting diff will help
  170. // debugging but the final protocol buffer size will be much larger.
  171. optional bool snapshot_diff = 16 [default = false];
  172. enum SnapshotFormat {
  173. HDF5 = 0;
  174. BINARYPROTO = 1;
  175. }
  176. optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO];
  177. // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default.
  178. enum SolverMode {
  179. CPU = 0;
  180. GPU = 1;
  181. }
  182. optional SolverMode solver_mode = 17 [default = GPU];
  183. // the device_id will that be used in GPU mode. Use device_id = 0 in default.
  184. optional int32 device_id = 18 [default = 0];
  185. // If non-negative, the seed with which the Solver will initialize the Caffe
  186. // random number generator -- useful for reproducible results. Otherwise,
  187. // (and by default) initialize using a seed derived from the system clock.
  188. optional int64 random_seed = 20 [default = -1];
  189. // type of the solver
  190. optional string type = 40 [default = "SGD"];
  191. // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam
  192. optional float delta = 31 [default = 1e-8];
  193. // parameters for the Adam solver
  194. optional float momentum2 = 39 [default = 0.999];
  195. // RMSProp decay value
  196. // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t)
  197. optional float rms_decay = 38 [default = 0.99];
  198. // If true, print information about the state of the net that may help with
  199. // debugging learning problems.
  200. optional bool debug_info = 23 [default = false];
  201. // If false, don't save a snapshot after training finishes.
  202. optional bool snapshot_after_train = 28 [default = true];
  203. // DEPRECATED: old solver enum types, use string instead
  204. enum SolverType {
  205. SGD = 0;
  206. NESTEROV = 1;
  207. ADAGRAD = 2;
  208. RMSPROP = 3;
  209. ADADELTA = 4;
  210. ADAM = 5;
  211. }
  212. // DEPRECATED: use type instead of solver_type
  213. optional SolverType solver_type = 30 [default = SGD];
  214. }
  215. // A message that stores the solver snapshots
  216. message SolverState {
  217. optional int32 iter = 1; // The current iteration
  218. optional string learned_net = 2; // The file that stores the learned net.
  219. repeated BlobProto history = 3; // The history for sgd solvers
  220. optional int32 current_step = 4 [default = 0]; // The current step for learning rate
  221. }
  222. enum Phase {
  223. TRAIN = 0;
  224. TEST = 1;
  225. }
  226. message NetState {
  227. optional Phase phase = 1 [default = TEST];
  228. optional int32 level = 2 [default = 0];
  229. repeated string stage = 3;
  230. }
  231. message NetStateRule {
  232. // Set phase to require the NetState have a particular phase (TRAIN or TEST)
  233. // to meet this rule.
  234. optional Phase phase = 1;
  235. // Set the minimum and/or maximum levels in which the layer should be used.
  236. // Leave undefined to meet the rule regardless of level.
  237. optional int32 min_level = 2;
  238. optional int32 max_level = 3;
  239. // Customizable sets of stages to include or exclude.
  240. // The net must have ALL of the specified stages and NONE of the specified
  241. // "not_stage"s to meet the rule.
  242. // (Use multiple NetStateRules to specify conjunctions of stages.)
  243. repeated string stage = 4;
  244. repeated string not_stage = 5;
  245. }
  246. // Specifies training parameters (multipliers on global learning constants,
  247. // and the name and other settings used for weight sharing).
  248. message ParamSpec {
  249. // The names of the parameter blobs -- useful for sharing parameters among
  250. // layers, but never required otherwise. To share a parameter between two
  251. // layers, give it a (non-empty) name.
  252. optional string name = 1;
  253. // Whether to require shared weights to have the same shape, or just the same
  254. // count -- defaults to STRICT if unspecified.
  255. optional DimCheckMode share_mode = 2;
  256. enum DimCheckMode {
  257. // STRICT (default) requires that num, channels, height, width each match.
  258. STRICT = 0;
  259. // PERMISSIVE requires only the count (num*channels*height*width) to match.
  260. PERMISSIVE = 1;
  261. }
  262. // The multiplier on the global learning rate for this parameter.
  263. optional float lr_mult = 3 [default = 1.0];
  264. // The multiplier on the global weight decay for this parameter.
  265. optional float decay_mult = 4 [default = 1.0];
  266. }
  267. // NOTE
  268. // Update the next available ID when you add a new LayerParameter field.
  269. //
  270. // LayerParameter next available layer-specific ID: 147 (last added: recurrent_param)
  271. message LayerParameter {
  272. optional string name = 1; // the layer name
  273. optional string type = 2; // the layer type
  274. repeated string bottom = 3; // the name of each bottom blob
  275. repeated string top = 4; // the name of each top blob
  276. // The train / test phase for computation.
  277. optional Phase phase = 10;
  278. // The amount of weight to assign each top blob in the objective.
  279. // Each layer assigns a default value, usually of either 0 or 1,
  280. // to each top blob.
  281. repeated float loss_weight = 5;
  282. // Specifies training parameters (multipliers on global learning constants,
  283. // and the name and other settings used for weight sharing).
  284. repeated ParamSpec param = 6;
  285. // The blobs containing the numeric parameters of the layer.
  286. repeated BlobProto blobs = 7;
  287. // Specifies whether to backpropagate to each bottom. If unspecified,
  288. // Caffe will automatically infer whether each input needs backpropagation
  289. // to compute parameter gradients. If set to true for some inputs,
  290. // backpropagation to those inputs is forced; if set false for some inputs,
  291. // backpropagation to those inputs is skipped.
  292. //
  293. // The size must be either 0 or equal to the number of bottoms.
  294. repeated bool propagate_down = 11;
  295. // Rules controlling whether and when a layer is included in the network,
  296. // based on the current NetState. You may specify a non-zero number of rules
  297. // to include OR exclude, but not both. If no include or exclude rules are
  298. // specified, the layer is always included. If the current NetState meets
  299. // ANY (i.e., one or more) of the specified rules, the layer is
  300. // included/excluded.
  301. repeated NetStateRule include = 8;
  302. repeated NetStateRule exclude = 9;
  303. // Parameters for data pre-processing.
  304. optional TransformationParameter transform_param = 100;
  305. // Parameters shared by loss layers.
  306. optional LossParameter loss_param = 101;
  307. // Layer type-specific parameters.
  308. //
  309. // Note: certain layers may have more than one computational engine
  310. // for their implementation. These layers include an Engine type and
  311. // engine parameter for selecting the implementation.
  312. // The default for the engine is set by the ENGINE switch at compile-time.
  313. optional AccuracyParameter accuracy_param = 102;
  314. optional ArgMaxParameter argmax_param = 103;
  315. optional BatchNormParameter batch_norm_param = 139;
  316. optional BiasParameter bias_param = 141;
  317. optional ConcatParameter concat_param = 104;
  318. optional ContrastiveLossParameter contrastive_loss_param = 105;
  319. optional ConvolutionParameter convolution_param = 106;
  320. optional CropParameter crop_param = 144;
  321. optional DataParameter data_param = 107;
  322. optional DropoutParameter dropout_param = 108;
  323. optional DummyDataParameter dummy_data_param = 109;
  324. optional EltwiseParameter eltwise_param = 110;
  325. optional ELUParameter elu_param = 140;
  326. optional EmbedParameter embed_param = 137;
  327. optional ExpParameter exp_param = 111;
  328. optional FlattenParameter flatten_param = 135;
  329. optional HDF5DataParameter hdf5_data_param = 112;
  330. optional HDF5OutputParameter hdf5_output_param = 113;
  331. optional HingeLossParameter hinge_loss_param = 114;
  332. optional ImageDataParameter image_data_param = 115;
  333. optional InfogainLossParameter infogain_loss_param = 116;
  334. optional InnerProductParameter inner_product_param = 117;
  335. optional InputParameter input_param = 143;
  336. optional LogParameter log_param = 134;
  337. optional LRNParameter lrn_param = 118;
  338. optional MemoryDataParameter memory_data_param = 119;
  339. optional MVNParameter mvn_param = 120;
  340. optional ParameterParameter parameter_param = 145;
  341. optional PoolingParameter pooling_param = 121;
  342. optional PowerParameter power_param = 122;
  343. optional PReLUParameter prelu_param = 131;
  344. optional PythonParameter python_param = 130;
  345. optional RecurrentParameter recurrent_param = 146;
  346. optional ReductionParameter reduction_param = 136;
  347. optional ReLUParameter relu_param = 123;
  348. optional ReshapeParameter reshape_param = 133;
  349. optional ScaleParameter scale_param = 142;
  350. optional SigmoidParameter sigmoid_param = 124;
  351. optional SoftmaxParameter softmax_param = 125;
  352. optional SPPParameter spp_param = 132;
  353. optional SliceParameter slice_param = 126;
  354. optional TanHParameter tanh_param = 127;
  355. optional ThresholdParameter threshold_param = 128;
  356. optional TileParameter tile_param = 138;
  357. optional WindowDataParameter window_data_param = 129;
  358. }
  359. // Message that stores parameters used to apply transformation
  360. // to the data layer's data
  361. message TransformationParameter {
  362. // For data pre-processing, we can do simple scaling and subtracting the
  363. // data mean, if provided. Note that the mean subtraction is always carried
  364. // out before scaling.
  365. optional float scale = 1 [default = 1];
  366. // Specify if we want to randomly mirror data.
  367. optional bool mirror = 2 [default = false];
  368. // Specify if we would like to randomly crop an image.
  369. optional uint32 crop_size = 3 [default = 0];
  370. // mean_file and mean_value cannot be specified at the same time
  371. optional string mean_file = 4;
  372. // if specified can be repeated once (would substract it from all the channels)
  373. // or can be repeated the same number of times as channels
  374. // (would subtract them from the corresponding channel)
  375. repeated float mean_value = 5;
  376. // Force the decoded image to have 3 color channels.
  377. optional bool force_color = 6 [default = false];
  378. // Force the decoded image to have 1 color channels.
  379. optional bool force_gray = 7 [default = false];
  380. }
  381. // Message that stores parameters shared by loss layers
  382. message LossParameter {
  383. // If specified, ignore instances with the given label.
  384. optional int32 ignore_label = 1;
  385. // How to normalize the loss for loss layers that aggregate across batches,
  386. // spatial dimensions, or other dimensions. Currently only implemented in
  387. // SoftmaxWithLoss layer.
  388. enum NormalizationMode {
  389. // Divide by the number of examples in the batch times spatial dimensions.
  390. // Outputs that receive the ignore label will NOT be ignored in computing
  391. // the normalization factor.
  392. FULL = 0;
  393. // Divide by the total number of output locations that do not take the
  394. // ignore_label. If ignore_label is not set, this behaves like FULL.
  395. VALID = 1;
  396. // Divide by the batch size.
  397. BATCH_SIZE = 2;
  398. // Do not normalize the loss.
  399. NONE = 3;
  400. }
  401. optional NormalizationMode normalization = 3 [default = VALID];
  402. // Deprecated. Ignored if normalization is specified. If normalization
  403. // is not specified, then setting this to false will be equivalent to
  404. // normalization = BATCH_SIZE to be consistent with previous behavior.
  405. optional bool normalize = 2;
  406. }
  407. // Messages that store parameters used by individual layer types follow, in
  408. // alphabetical order.
  409. message AccuracyParameter {
  410. // When computing accuracy, count as correct by comparing the true label to
  411. // the top k scoring classes. By default, only compare to the top scoring
  412. // class (i.e. argmax).
  413. optional uint32 top_k = 1 [default = 1];
  414. // The "label" axis of the prediction blob, whose argmax corresponds to the
  415. // predicted label -- may be negative to index from the end (e.g., -1 for the
  416. // last axis). For example, if axis == 1 and the predictions are
  417. // (N x C x H x W), the label blob is expected to contain N*H*W ground truth
  418. // labels with integer values in {0, 1, ..., C-1}.
  419. optional int32 axis = 2 [default = 1];
  420. // If specified, ignore instances with the given label.
  421. optional int32 ignore_label = 3;
  422. }
  423. message ArgMaxParameter {
  424. // If true produce pairs (argmax, maxval)
  425. optional bool out_max_val = 1 [default = false];
  426. optional uint32 top_k = 2 [default = 1];
  427. // The axis along which to maximise -- may be negative to index from the
  428. // end (e.g., -1 for the last axis).
  429. // By default ArgMaxLayer maximizes over the flattened trailing dimensions
  430. // for each index of the first / num dimension.
  431. optional int32 axis = 3;
  432. }
  433. message ConcatParameter {
  434. // The axis along which to concatenate -- may be negative to index from the
  435. // end (e.g., -1 for the last axis). Other axes must have the
  436. // same dimension for all the bottom blobs.
  437. // By default, ConcatLayer concatenates blobs along the "channels" axis (1).
  438. optional int32 axis = 2 [default = 1];
  439. // DEPRECATED: alias for "axis" -- does not support negative indexing.
  440. optional uint32 concat_dim = 1 [default = 1];
  441. }
  442. message BatchNormParameter {
  443. // If false, accumulate global mean/variance values via a moving average. If
  444. // true, use those accumulated values instead of computing mean/variance
  445. // across the batch.
  446. optional bool use_global_stats = 1;
  447. // How much does the moving average decay each iteration?
  448. optional float moving_average_fraction = 2 [default = .999];
  449. // Small value to add to the variance estimate so that we don't divide by
  450. // zero.
  451. optional float eps = 3 [default = 1e-5];
  452. }
  453. message BiasParameter {
  454. // The first axis of bottom[0] (the first input Blob) along which to apply
  455. // bottom[1] (the second input Blob). May be negative to index from the end
  456. // (e.g., -1 for the last axis).
  457. //
  458. // For example, if bottom[0] is 4D with shape 100x3x40x60, the output
  459. // top[0] will have the same shape, and bottom[1] may have any of the
  460. // following shapes (for the given value of axis):
  461. // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60
  462. // (axis == 1 == -3) 3; 3x40; 3x40x60
  463. // (axis == 2 == -2) 40; 40x60
  464. // (axis == 3 == -1) 60
  465. // Furthermore, bottom[1] may have the empty shape (regardless of the value of
  466. // "axis") -- a scalar bias.
  467. optional int32 axis = 1 [default = 1];
  468. // (num_axes is ignored unless just one bottom is given and the bias is
  469. // a learned parameter of the layer. Otherwise, num_axes is determined by the
  470. // number of axes by the second bottom.)
  471. // The number of axes of the input (bottom[0]) covered by the bias
  472. // parameter, or -1 to cover all axes of bottom[0] starting from `axis`.
  473. // Set num_axes := 0, to add a zero-axis Blob: a scalar.
  474. optional int32 num_axes = 2 [default = 1];
  475. // (filler is ignored unless just one bottom is given and the bias is
  476. // a learned parameter of the layer.)
  477. // The initialization for the learned bias parameter.
  478. // Default is the zero (0) initialization, resulting in the BiasLayer
  479. // initially performing the identity operation.
  480. optional FillerParameter filler = 3;
  481. }
  482. message ContrastiveLossParameter {
  483. // margin for dissimilar pair
  484. optional float margin = 1 [default = 1.0];
  485. // The first implementation of this cost did not exactly match the cost of
  486. // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2.
  487. // legacy_version = false (the default) uses (margin - d)^2 as proposed in the
  488. // Hadsell paper. New models should probably use this version.
  489. // legacy_version = true uses (margin - d^2). This is kept to support /
  490. // reproduce existing models and results
  491. optional bool legacy_version = 2 [default = false];
  492. }
  493. message ConvolutionParameter {
  494. optional uint32 num_output = 1; // The number of outputs for the layer
  495. optional bool bias_term = 2 [default = true]; // whether to have bias terms
  496. // Pad, kernel size, and stride are all given as a single value for equal
  497. // dimensions in all spatial dimensions, or once per spatial dimension.
  498. repeated uint32 pad = 3; // The padding size; defaults to 0
  499. repeated uint32 kernel_size = 4; // The kernel size
  500. repeated uint32 stride = 6; // The stride; defaults to 1
  501. // Factor used to dilate the kernel, (implicitly) zero-filling the resulting
  502. // holes. (Kernel dilation is sometimes referred to by its use in the
  503. // algorithme à trous from Holschneider et al. 1987.)
  504. repeated uint32 dilation = 18; // The dilation; defaults to 1
  505. // For 2D convolution only, the *_h and *_w versions may also be used to
  506. // specify both spatial dimensions.
  507. optional uint32 pad_h = 9 [default = 0]; // The padding height (2D only)
  508. optional uint32 pad_w = 10 [default = 0]; // The padding width (2D only)
  509. optional uint32 kernel_h = 11; // The kernel height (2D only)
  510. optional uint32 kernel_w = 12; // The kernel width (2D only)
  511. optional uint32 stride_h = 13; // The stride height (2D only)
  512. optional uint32 stride_w = 14; // The stride width (2D only)
  513. optional uint32 group = 5 [default = 1]; // The group size for group conv
  514. optional FillerParameter weight_filler = 7; // The filler for the weight
  515. optional FillerParameter bias_filler = 8; // The filler for the bias
  516. enum Engine {
  517. DEFAULT = 0;
  518. CAFFE = 1;
  519. CUDNN = 2;
  520. }
  521. optional Engine engine = 15 [default = DEFAULT];
  522. // The axis to interpret as "channels" when performing convolution.
  523. // Preceding dimensions are treated as independent inputs;
  524. // succeeding dimensions are treated as "spatial".
  525. // With (N, C, H, W) inputs, and axis == 1 (the default), we perform
  526. // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for
  527. // groups g>1) filters across the spatial axes (H, W) of the input.
  528. // With (N, C, D, H, W) inputs, and axis == 1, we perform
  529. // N independent 3D convolutions, sliding (C/g)-channels
  530. // filters across the spatial axes (D, H, W) of the input.
  531. optional int32 axis = 16 [default = 1];
  532. // Whether to force use of the general ND convolution, even if a specific
  533. // implementation for blobs of the appropriate number of spatial dimensions
  534. // is available. (Currently, there is only a 2D-specific convolution
  535. // implementation; for input blobs with num_axes != 2, this option is
  536. // ignored and the ND implementation will be used.)
  537. optional bool force_nd_im2col = 17 [default = false];
  538. }
  539. message CropParameter {
  540. // To crop, elements of the first bottom are selected to fit the dimensions
  541. // of the second, reference bottom. The crop is configured by
  542. // - the crop `axis` to pick the dimensions for cropping
  543. // - the crop `offset` to set the shift for all/each dimension
  544. // to align the cropped bottom with the reference bottom.
  545. // All dimensions up to but excluding `axis` are preserved, while
  546. // the dimensions including and trailing `axis` are cropped.
  547. // If only one `offset` is set, then all dimensions are offset by this amount.
  548. // Otherwise, the number of offsets must equal the number of cropped axes to
  549. // shift the crop in each dimension accordingly.
  550. // Note: standard dimensions are N,C,H,W so the default is a spatial crop,
  551. // and `axis` may be negative to index from the end (e.g., -1 for the last
  552. // axis).
  553. optional int32 axis = 1 [default = 2];
  554. repeated uint32 offset = 2;
  555. }
  556. message DataParameter {
  557. enum DB {
  558. LEVELDB = 0;
  559. LMDB = 1;
  560. }
  561. // Specify the data source.
  562. optional string source = 1;
  563. // Specify the batch size.
  564. optional uint32 batch_size = 4;
  565. // The rand_skip variable is for the data layer to skip a few data points
  566. // to avoid all asynchronous sgd clients to start at the same point. The skip
  567. // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
  568. // be larger than the number of keys in the database.
  569. // DEPRECATED. Each solver accesses a different subset of the database.
  570. optional uint32 rand_skip = 7 [default = 0];
  571. optional DB backend = 8 [default = LEVELDB];
  572. // DEPRECATED. See TransformationParameter. For data pre-processing, we can do
  573. // simple scaling and subtracting the data mean, if provided. Note that the
  574. // mean subtraction is always carried out before scaling.
  575. optional float scale = 2 [default = 1];
  576. optional string mean_file = 3;
  577. // DEPRECATED. See TransformationParameter. Specify if we would like to randomly
  578. // crop an image.
  579. optional uint32 crop_size = 5 [default = 0];
  580. // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror
  581. // data.
  582. optional bool mirror = 6 [default = false];
  583. // Force the encoded image to have 3 color channels
  584. optional bool force_encoded_color = 9 [default = false];
  585. // Prefetch queue (Number of batches to prefetch to host memory, increase if
  586. // data access bandwidth varies).
  587. optional uint32 prefetch = 10 [default = 4];
  588. }
  589. message DropoutParameter {
  590. optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio
  591. }
  592. // DummyDataLayer fills any number of arbitrarily shaped blobs with random
  593. // (or constant) data generated by "Fillers" (see "message FillerParameter").
  594. message DummyDataParameter {
  595. // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N
  596. // shape fields, and 0, 1 or N data_fillers.
  597. //
  598. // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used.
  599. // If 1 data_filler is specified, it is applied to all top blobs. If N are
  600. // specified, the ith is applied to the ith top blob.
  601. repeated FillerParameter data_filler = 1;
  602. repeated BlobShape shape = 6;
  603. // 4D dimensions -- deprecated. Use "shape" instead.
  604. repeated uint32 num = 2;
  605. repeated uint32 channels = 3;
  606. repeated uint32 height = 4;
  607. repeated uint32 width = 5;
  608. }
  609. message EltwiseParameter {
  610. enum EltwiseOp {
  611. PROD = 0;
  612. SUM = 1;
  613. MAX = 2;
  614. }
  615. optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation
  616. repeated float coeff = 2; // blob-wise coefficient for SUM operation
  617. // Whether to use an asymptotically slower (for >2 inputs) but stabler method
  618. // of computing the gradient for the PROD operation. (No effect for SUM op.)
  619. optional bool stable_prod_grad = 3 [default = true];
  620. }
  621. // Message that stores parameters used by ELULayer
  622. message ELUParameter {
  623. // Described in:
  624. // Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate
  625. // Deep Network Learning by Exponential Linear Units (ELUs). arXiv
  626. optional float alpha = 1 [default = 1];
  627. }
  628. // Message that stores parameters used by EmbedLayer
  629. message EmbedParameter {
  630. optional uint32 num_output = 1; // The number of outputs for the layer
  631. // The input is given as integers to be interpreted as one-hot
  632. // vector indices with dimension num_input. Hence num_input should be
  633. // 1 greater than the maximum possible input value.
  634. optional uint32 input_dim = 2;
  635. optional bool bias_term = 3 [default = true]; // Whether to use a bias term
  636. optional FillerParameter weight_filler = 4; // The filler for the weight
  637. optional FillerParameter bias_filler = 5; // The filler for the bias
  638. }
  639. // Message that stores parameters used by ExpLayer
  640. message ExpParameter {
  641. // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0.
  642. // Or if base is set to the default (-1), base is set to e,
  643. // so y = exp(shift + scale * x).
  644. optional float base = 1 [default = -1.0];
  645. optional float scale = 2 [default = 1.0];
  646. optional float shift = 3 [default = 0.0];
  647. }
  648. /// Message that stores parameters used by FlattenLayer
  649. message FlattenParameter {
  650. // The first axis to flatten: all preceding axes are retained in the output.
  651. // May be negative to index from the end (e.g., -1 for the last axis).
  652. optional int32 axis = 1 [default = 1];
  653. // The last axis to flatten: all following axes are retained in the output.
  654. // May be negative to index from the end (e.g., the default -1 for the last
  655. // axis).
  656. optional int32 end_axis = 2 [default = -1];
  657. }
  658. // Message that stores parameters used by HDF5DataLayer
  659. message HDF5DataParameter {
  660. // Specify the data source.
  661. optional string source = 1;
  662. // Specify the batch size.
  663. optional uint32 batch_size = 2;
  664. // Specify whether to shuffle the data.
  665. // If shuffle == true, the ordering of the HDF5 files is shuffled,
  666. // and the ordering of data within any given HDF5 file is shuffled,
  667. // but data between different files are not interleaved; all of a file's
  668. // data are output (in a random order) before moving onto another file.
  669. optional bool shuffle = 3 [default = false];
  670. }
  671. message HDF5OutputParameter {
  672. optional string file_name = 1;
  673. }
  674. message HingeLossParameter {
  675. enum Norm {
  676. L1 = 1;
  677. L2 = 2;
  678. }
  679. // Specify the Norm to use L1 or L2
  680. optional Norm norm = 1 [default = L1];
  681. }
  682. message ImageDataParameter {
  683. // Specify the data source.
  684. optional string source = 1;
  685. // Specify the batch size.
  686. optional uint32 batch_size = 4 [default = 1];
  687. // The rand_skip variable is for the data layer to skip a few data points
  688. // to avoid all asynchronous sgd clients to start at the same point. The skip
  689. // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
  690. // be larger than the number of keys in the database.
  691. optional uint32 rand_skip = 7 [default = 0];
  692. // Whether or not ImageLayer should shuffle the list of files at every epoch.
  693. optional bool shuffle = 8 [default = false];
  694. // It will also resize images if new_height or new_width are not zero.
  695. optional uint32 new_height = 9 [default = 0];
  696. optional uint32 new_width = 10 [default = 0];
  697. // Specify if the images are color or gray
  698. optional bool is_color = 11 [default = true];
  699. // DEPRECATED. See TransformationParameter. For data pre-processing, we can do
  700. // simple scaling and subtracting the data mean, if provided. Note that the
  701. // mean subtraction is always carried out before scaling.
  702. optional float scale = 2 [default = 1];
  703. optional string mean_file = 3;
  704. // DEPRECATED. See TransformationParameter. Specify if we would like to randomly
  705. // crop an image.
  706. optional uint32 crop_size = 5 [default = 0];
  707. // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror
  708. // data.
  709. optional bool mirror = 6 [default = false];
  710. optional string root_folder = 12 [default = ""];
  711. }
  712. message InfogainLossParameter {
  713. // Specify the infogain matrix source.
  714. optional string source = 1;
  715. }
  716. message InnerProductParameter {
  717. optional uint32 num_output = 1; // The number of outputs for the layer
  718. optional bool bias_term = 2 [default = true]; // whether to have bias terms
  719. optional FillerParameter weight_filler = 3; // The filler for the weight
  720. optional FillerParameter bias_filler = 4; // The filler for the bias
  721. // The first axis to be lumped into a single inner product computation;
  722. // all preceding axes are retained in the output.
  723. // May be negative to index from the end (e.g., -1 for the last axis).
  724. optional int32 axis = 5 [default = 1];
  725. // Specify whether to transpose the weight matrix or not.
  726. // If transpose == true, any operations will be performed on the transpose
  727. // of the weight matrix. The weight matrix itself is not going to be transposed
  728. // but rather the transfer flag of operations will be toggled accordingly.
  729. optional bool transpose = 6 [default = false];
  730. }
  731. message InputParameter {
  732. // This layer produces N >= 1 top blob(s) to be assigned manually.
  733. // Define N shapes to set a shape for each top.
  734. // Define 1 shape to set the same shape for every top.
  735. // Define no shape to defer to reshaping manually.
  736. repeated BlobShape shape = 1;
  737. }
  738. // Message that stores parameters used by LogLayer
  739. message LogParameter {
  740. // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0.
  741. // Or if base is set to the default (-1), base is set to e,
  742. // so y = ln(shift + scale * x) = log_e(shift + scale * x)
  743. optional float base = 1 [default = -1.0];
  744. optional float scale = 2 [default = 1.0];
  745. optional float shift = 3 [default = 0.0];
  746. }
  747. // Message that stores parameters used by LRNLayer
  748. message LRNParameter {
  749. optional uint32 local_size = 1 [default = 5];
  750. optional float alpha = 2 [default = 1.];
  751. optional float beta = 3 [default = 0.75];
  752. enum NormRegion {
  753. ACROSS_CHANNELS = 0;
  754. WITHIN_CHANNEL = 1;
  755. }
  756. optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS];
  757. optional float k = 5 [default = 1.];
  758. enum Engine {
  759. DEFAULT = 0;
  760. CAFFE = 1;
  761. CUDNN = 2;
  762. }
  763. optional Engine engine = 6 [default = DEFAULT];
  764. }
  765. message MemoryDataParameter {
  766. optional uint32 batch_size = 1;
  767. optional uint32 channels = 2;
  768. optional uint32 height = 3;
  769. optional uint32 width = 4;
  770. }
  771. message MVNParameter {
  772. // This parameter can be set to false to normalize mean only
  773. optional bool normalize_variance = 1 [default = true];
  774. // This parameter can be set to true to perform DNN-like MVN
  775. optional bool across_channels = 2 [default = false];
  776. // Epsilon for not dividing by zero while normalizing variance
  777. optional float eps = 3 [default = 1e-9];
  778. }
  779. message ParameterParameter {
  780. optional BlobShape shape = 1;
  781. }
  782. message PoolingParameter {
  783. enum PoolMethod {
  784. MAX = 0;
  785. AVE = 1;
  786. STOCHASTIC = 2;
  787. }
  788. optional PoolMethod pool = 1 [default = MAX]; // The pooling method
  789. // Pad, kernel size, and stride are all given as a single value for equal
  790. // dimensions in height and width or as Y, X pairs.
  791. optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X)
  792. optional uint32 pad_h = 9 [default = 0]; // The padding height
  793. optional uint32 pad_w = 10 [default = 0]; // The padding width
  794. optional uint32 kernel_size = 2; // The kernel size (square)
  795. optional uint32 kernel_h = 5; // The kernel height
  796. optional uint32 kernel_w = 6; // The kernel width
  797. optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X)
  798. optional uint32 stride_h = 7; // The stride height
  799. optional uint32 stride_w = 8; // The stride width
  800. enum Engine {
  801. DEFAULT = 0;
  802. CAFFE = 1;
  803. CUDNN = 2;
  804. }
  805. optional Engine engine = 11 [default = DEFAULT];
  806. // If global_pooling then it will pool over the size of the bottom by doing
  807. // kernel_h = bottom->height and kernel_w = bottom->width
  808. optional bool global_pooling = 12 [default = false];
  809. }
  810. message PowerParameter {
  811. // PowerLayer computes outputs y = (shift + scale * x) ^ power.
  812. optional float power = 1 [default = 1.0];
  813. optional float scale = 2 [default = 1.0];
  814. optional float shift = 3 [default = 0.0];
  815. }
  816. message PythonParameter {
  817. optional string module = 1;
  818. optional string layer = 2;
  819. // This value is set to the attribute `param_str` of the `PythonLayer` object
  820. // in Python before calling the `setup()` method. This could be a number,
  821. // string, dictionary in Python dict format, JSON, etc. You may parse this
  822. // string in `setup` method and use it in `forward` and `backward`.
  823. optional string param_str = 3 [default = ''];
  824. // Whether this PythonLayer is shared among worker solvers during data parallelism.
  825. // If true, each worker solver sequentially run forward from this layer.
  826. // This value should be set true if you are using it as a data layer.
  827. optional bool share_in_parallel = 4 [default = false];
  828. }
  829. // Message that stores parameters used by RecurrentLayer
  830. message RecurrentParameter {
  831. // The dimension of the output (and usually hidden state) representation --
  832. // must be explicitly set to non-zero.
  833. optional uint32 num_output = 1 [default = 0];
  834. optional FillerParameter weight_filler = 2; // The filler for the weight
  835. optional FillerParameter bias_filler = 3; // The filler for the bias
  836. // Whether to enable displaying debug_info in the unrolled recurrent net.
  837. optional bool debug_info = 4 [default = false];
  838. // Whether to add as additional inputs (bottoms) the initial hidden state
  839. // blobs, and add as additional outputs (tops) the final timestep hidden state
  840. // blobs. The number of additional bottom/top blobs required depends on the
  841. // recurrent architecture -- e.g., 1 for RNNs, 2 for LSTMs.
  842. optional bool expose_hidden = 5 [default = false];
  843. }
  844. // Message that stores parameters used by ReductionLayer
  845. message ReductionParameter {
  846. enum ReductionOp {
  847. SUM = 1;
  848. ASUM = 2;
  849. SUMSQ = 3;
  850. MEAN = 4;
  851. }
  852. optional ReductionOp operation = 1 [default = SUM]; // reduction operation
  853. // The first axis to reduce to a scalar -- may be negative to index from the
  854. // end (e.g., -1 for the last axis).
  855. // (Currently, only reduction along ALL "tail" axes is supported; reduction
  856. // of axis M through N, where N < num_axes - 1, is unsupported.)
  857. // Suppose we have an n-axis bottom Blob with shape:
  858. // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)).
  859. // If axis == m, the output Blob will have shape
  860. // (d0, d1, d2, ..., d(m-1)),
  861. // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1))
  862. // times, each including (dm * d(m+1) * ... * d(n-1)) individual data.
  863. // If axis == 0 (the default), the output Blob always has the empty shape
  864. // (count 1), performing reduction across the entire input --
  865. // often useful for creating new loss functions.
  866. optional int32 axis = 2 [default = 0];
  867. optional float coeff = 3 [default = 1.0]; // coefficient for output
  868. }
  869. // Message that stores parameters used by ReLULayer
  870. message ReLUParameter {
  871. // Allow non-zero slope for negative inputs to speed up optimization
  872. // Described in:
  873. // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities
  874. // improve neural network acoustic models. In ICML Workshop on Deep Learning
  875. // for Audio, Speech, and Language Processing.
  876. optional float negative_slope = 1 [default = 0];
  877. enum Engine {
  878. DEFAULT = 0;
  879. CAFFE = 1;
  880. CUDNN = 2;
  881. }
  882. optional Engine engine = 2 [default = DEFAULT];
  883. }
  884. message ReshapeParameter {
  885. // Specify the output dimensions. If some of the dimensions are set to 0,
  886. // the corresponding dimension from the bottom layer is used (unchanged).
  887. // Exactly one dimension may be set to -1, in which case its value is
  888. // inferred from the count of the bottom blob and the remaining dimensions.
  889. // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8:
  890. //
  891. // layer {
  892. // type: "Reshape" bottom: "input" top: "output"
  893. // reshape_param { ... }
  894. // }
  895. //
  896. // If "input" is 2D with shape 2 x 8, then the following reshape_param
  897. // specifications are all equivalent, producing a 3D blob "output" with shape
  898. // 2 x 2 x 4:
  899. //
  900. // reshape_param { shape { dim: 2 dim: 2 dim: 4 } }
  901. // reshape_param { shape { dim: 0 dim: 2 dim: 4 } }
  902. // reshape_param { shape { dim: 0 dim: 2 dim: -1 } }
  903. // reshape_param { shape { dim: 0 dim:-1 dim: 4 } }
  904. //
  905. optional BlobShape shape = 1;
  906. // axis and num_axes control the portion of the bottom blob's shape that are
  907. // replaced by (included in) the reshape. By default (axis == 0 and
  908. // num_axes == -1), the entire bottom blob shape is included in the reshape,
  909. // and hence the shape field must specify the entire output shape.
  910. //
  911. // axis may be non-zero to retain some portion of the beginning of the input
  912. // shape (and may be negative to index from the end; e.g., -1 to begin the
  913. // reshape after the last axis, including nothing in the reshape,
  914. // -2 to include only the last axis, etc.).
  915. //
  916. // For example, suppose "input" is a 2D blob with shape 2 x 8.
  917. // Then the following ReshapeLayer specifications are all equivalent,
  918. // producing a blob "output" with shape 2 x 2 x 4:
  919. //
  920. // reshape_param { shape { dim: 2 dim: 2 dim: 4 } }
  921. // reshape_param { shape { dim: 2 dim: 4 } axis: 1 }
  922. // reshape_param { shape { dim: 2 dim: 4 } axis: -3 }
  923. //
  924. // num_axes specifies the extent of the reshape.
  925. // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on
  926. // input axes in the range [axis, axis+num_axes].
  927. // num_axes may also be -1, the default, to include all remaining axes
  928. // (starting from axis).
  929. //
  930. // For example, suppose "input" is a 2D blob with shape 2 x 8.
  931. // Then the following ReshapeLayer specifications are equivalent,
  932. // producing a blob "output" with shape 1 x 2 x 8.
  933. //
  934. // reshape_param { shape { dim: 1 dim: 2 dim: 8 } }
  935. // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 }
  936. // reshape_param { shape { dim: 1 } num_axes: 0 }
  937. //
  938. // On the other hand, these would produce output blob shape 2 x 1 x 8:
  939. //
  940. // reshape_param { shape { dim: 2 dim: 1 dim: 8 } }
  941. // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 }
  942. //
  943. optional int32 axis = 2 [default = 0];
  944. optional int32 num_axes = 3 [default = -1];
  945. }
  946. message ScaleParameter {
  947. // The first axis of bottom[0] (the first input Blob) along which to apply
  948. // bottom[1] (the second input Blob). May be negative to index from the end
  949. // (e.g., -1 for the last axis).
  950. //
  951. // For example, if bottom[0] is 4D with shape 100x3x40x60, the output
  952. // top[0] will have the same shape, and bottom[1] may have any of the
  953. // following shapes (for the given value of axis):
  954. // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60
  955. // (axis == 1 == -3) 3; 3x40; 3x40x60
  956. // (axis == 2 == -2) 40; 40x60
  957. // (axis == 3 == -1) 60
  958. // Furthermore, bottom[1] may have the empty shape (regardless of the value of
  959. // "axis") -- a scalar multiplier.
  960. optional int32 axis = 1 [default = 1];
  961. // (num_axes is ignored unless just one bottom is given and the scale is
  962. // a learned parameter of the layer. Otherwise, num_axes is determined by the
  963. // number of axes by the second bottom.)
  964. // The number of axes of the input (bottom[0]) covered by the scale
  965. // parameter, or -1 to cover all axes of bottom[0] starting from `axis`.
  966. // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar.
  967. optional int32 num_axes = 2 [default = 1];
  968. // (filler is ignored unless just one bottom is given and the scale is
  969. // a learned parameter of the layer.)
  970. // The initialization for the learned scale parameter.
  971. // Default is the unit (1) initialization, resulting in the ScaleLayer
  972. // initially performing the identity operation.
  973. optional FillerParameter filler = 3;
  974. // Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer, but
  975. // may be more efficient). Initialized with bias_filler (defaults to 0).
  976. optional bool bias_term = 4 [default = false];
  977. optional FillerParameter bias_filler = 5;
  978. }
  979. message SigmoidParameter {
  980. enum Engine {
  981. DEFAULT = 0;
  982. CAFFE = 1;
  983. CUDNN = 2;
  984. }
  985. optional Engine engine = 1 [default = DEFAULT];
  986. }
  987. message SliceParameter {
  988. // The axis along which to slice -- may be negative to index from the end
  989. // (e.g., -1 for the last axis).
  990. // By default, SliceLayer concatenates blobs along the "channels" axis (1).
  991. optional int32 axis = 3 [default = 1];
  992. repeated uint32 slice_point = 2;
  993. // DEPRECATED: alias for "axis" -- does not support negative indexing.
  994. optional uint32 slice_dim = 1 [default = 1];
  995. }
  996. // Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer
  997. message SoftmaxParameter {
  998. enum Engine {
  999. DEFAULT = 0;
  1000. CAFFE = 1;
  1001. CUDNN = 2;
  1002. }
  1003. optional Engine engine = 1 [default = DEFAULT];
  1004. // The axis along which to perform the softmax -- may be negative to index
  1005. // from the end (e.g., -1 for the last axis).
  1006. // Any other axes will be evaluated as independent softmaxes.
  1007. optional int32 axis = 2 [default = 1];
  1008. }
  1009. message TanHParameter {
  1010. enum Engine {
  1011. DEFAULT = 0;
  1012. CAFFE = 1;
  1013. CUDNN = 2;
  1014. }
  1015. optional Engine engine = 1 [default = DEFAULT];
  1016. }
  1017. // Message that stores parameters used by TileLayer
  1018. message TileParameter {
  1019. // The index of the axis to tile.
  1020. optional int32 axis = 1 [default = 1];
  1021. // The number of copies (tiles) of the blob to output.
  1022. optional int32 tiles = 2;
  1023. }
  1024. // Message that stores parameters used by ThresholdLayer
  1025. message ThresholdParameter {
  1026. optional float threshold = 1 [default = 0]; // Strictly positive values
  1027. }
  1028. message WindowDataParameter {
  1029. // Specify the data source.
  1030. optional string source = 1;
  1031. // For data pre-processing, we can do simple scaling and subtracting the
  1032. // data mean, if provided. Note that the mean subtraction is always carried
  1033. // out before scaling.
  1034. optional float scale = 2 [default = 1];
  1035. optional string mean_file = 3;
  1036. // Specify the batch size.
  1037. optional uint32 batch_size = 4;
  1038. // Specify if we would like to randomly crop an image.
  1039. optional uint32 crop_size = 5 [default = 0];
  1040. // Specify if we want to randomly mirror data.
  1041. optional bool mirror = 6 [default = false];
  1042. // Foreground (object) overlap threshold
  1043. optional float fg_threshold = 7 [default = 0.5];
  1044. // Background (non-object) overlap threshold
  1045. optional float bg_threshold = 8 [default = 0.5];
  1046. // Fraction of batch that should be foreground objects
  1047. optional float fg_fraction = 9 [default = 0.25];
  1048. // Amount of contextual padding to add around a window
  1049. // (used only by the window_data_layer)
  1050. optional uint32 context_pad = 10 [default = 0];
  1051. // Mode for cropping out a detection window
  1052. // warp: cropped window is warped to a fixed size and aspect ratio
  1053. // square: the tightest square around the window is cropped
  1054. optional string crop_mode = 11 [default = "warp"];
  1055. // cache_images: will load all images in memory for faster access
  1056. optional bool cache_images = 12 [default = false];
  1057. // append root_folder to locate images
  1058. optional string root_folder = 13 [default = ""];
  1059. }
  1060. message SPPParameter {
  1061. enum PoolMethod {
  1062. MAX = 0;
  1063. AVE = 1;
  1064. STOCHASTIC = 2;
  1065. }
  1066. optional uint32 pyramid_height = 1;
  1067. optional PoolMethod pool = 2 [default = MAX]; // The pooling method
  1068. enum Engine {
  1069. DEFAULT = 0;
  1070. CAFFE = 1;
  1071. CUDNN = 2;
  1072. }
  1073. optional Engine engine = 6 [default = DEFAULT];
  1074. }
  1075. // DEPRECATED: use LayerParameter.
  1076. message V1LayerParameter {
  1077. repeated string bottom = 2;
  1078. repeated string top = 3;
  1079. optional string name = 4;
  1080. repeated NetStateRule include = 32;
  1081. repeated NetStateRule exclude = 33;
  1082. enum LayerType {
  1083. NONE = 0;
  1084. ABSVAL = 35;
  1085. ACCURACY = 1;
  1086. ARGMAX = 30;
  1087. BNLL = 2;
  1088. CONCAT = 3;
  1089. CONTRASTIVE_LOSS = 37;
  1090. CONVOLUTION = 4;
  1091. DATA = 5;
  1092. DECONVOLUTION = 39;
  1093. DROPOUT = 6;
  1094. DUMMY_DATA = 32;
  1095. EUCLIDEAN_LOSS = 7;
  1096. ELTWISE = 25;
  1097. EXP = 38;
  1098. FLATTEN = 8;
  1099. HDF5_DATA = 9;
  1100. HDF5_OUTPUT = 10;
  1101. HINGE_LOSS = 28;
  1102. IM2COL = 11;
  1103. IMAGE_DATA = 12;
  1104. INFOGAIN_LOSS = 13;
  1105. INNER_PRODUCT = 14;
  1106. LRN = 15;
  1107. MEMORY_DATA = 29;
  1108. MULTINOMIAL_LOGISTIC_LOSS = 16;
  1109. MVN = 34;
  1110. POOLING = 17;
  1111. POWER = 26;
  1112. RELU = 18;
  1113. SIGMOID = 19;
  1114. SIGMOID_CROSS_ENTROPY_LOSS = 27;
  1115. SILENCE = 36;
  1116. SOFTMAX = 20;
  1117. SOFTMAX_LOSS = 21;
  1118. SPLIT = 22;
  1119. SLICE = 33;
  1120. TANH = 23;
  1121. WINDOW_DATA = 24;
  1122. THRESHOLD = 31;
  1123. }
  1124. optional LayerType type = 5;
  1125. repeated BlobProto blobs = 6;
  1126. repeated string param = 1001;
  1127. repeated DimCheckMode blob_share_mode = 1002;
  1128. enum DimCheckMode {
  1129. STRICT = 0;
  1130. PERMISSIVE = 1;
  1131. }
  1132. repeated float blobs_lr = 7;
  1133. repeated float weight_decay = 8;
  1134. repeated float loss_weight = 35;
  1135. optional AccuracyParameter accuracy_param = 27;
  1136. optional ArgMaxParameter argmax_param = 23;
  1137. optional ConcatParameter concat_param = 9;
  1138. optional ContrastiveLossParameter contrastive_loss_param = 40;
  1139. optional ConvolutionParameter convolution_param = 10;
  1140. optional DataParameter data_param = 11;
  1141. optional DropoutParameter dropout_param = 12;
  1142. optional DummyDataParameter dummy_data_param = 26;
  1143. optional EltwiseParameter eltwise_param = 24;
  1144. optional ExpParameter exp_param = 41;
  1145. optional HDF5DataParameter hdf5_data_param = 13;
  1146. optional HDF5OutputParameter hdf5_output_param = 14;
  1147. optional HingeLossParameter hinge_loss_param = 29;
  1148. optional ImageDataParameter image_data_param = 15;
  1149. optional InfogainLossParameter infogain_loss_param = 16;
  1150. optional InnerProductParameter inner_product_param = 17;
  1151. optional LRNParameter lrn_param = 18;
  1152. optional MemoryDataParameter memory_data_param = 22;
  1153. optional MVNParameter mvn_param = 34;
  1154. optional PoolingParameter pooling_param = 19;
  1155. optional PowerParameter power_param = 21;
  1156. optional ReLUParameter relu_param = 30;
  1157. optional SigmoidParameter sigmoid_param = 38;
  1158. optional SoftmaxParameter softmax_param = 39;
  1159. optional SliceParameter slice_param = 31;
  1160. optional TanHParameter tanh_param = 37;
  1161. optional ThresholdParameter threshold_param = 25;
  1162. optional WindowDataParameter window_data_param = 20;
  1163. optional TransformationParameter transform_param = 36;
  1164. optional LossParameter loss_param = 42;
  1165. optional V0LayerParameter layer = 1;
  1166. }
  1167. // DEPRECATED: V0LayerParameter is the old way of specifying layer parameters
  1168. // in Caffe. We keep this message type around for legacy support.
  1169. message V0LayerParameter {
  1170. optional string name = 1; // the layer name
  1171. optional string type = 2; // the string to specify the layer type
  1172. // Parameters to specify layers with inner products.
  1173. optional uint32 num_output = 3; // The number of outputs for the layer
  1174. optional bool biasterm = 4 [default = true]; // whether to have bias terms
  1175. optional FillerParameter weight_filler = 5; // The filler for the weight
  1176. optional FillerParameter bias_filler = 6; // The filler for the bias
  1177. optional uint32 pad = 7 [default = 0]; // The padding size
  1178. optional uint32 kernelsize = 8; // The kernel size
  1179. optional uint32 group = 9 [default = 1]; // The group size for group conv
  1180. optional uint32 stride = 10 [default = 1]; // The stride
  1181. enum PoolMethod {
  1182. MAX = 0;
  1183. AVE = 1;
  1184. STOCHASTIC = 2;
  1185. }
  1186. optional PoolMethod pool = 11 [default = MAX]; // The pooling method
  1187. optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio
  1188. optional uint32 local_size = 13 [default = 5]; // for local response norm
  1189. optional float alpha = 14 [default = 1.]; // for local response norm
  1190. optional float beta = 15 [default = 0.75]; // for local response norm
  1191. optional float k = 22 [default = 1.];
  1192. // For data layers, specify the data source
  1193. optional string source = 16;
  1194. // For data pre-processing, we can do simple scaling and subtracting the
  1195. // data mean, if provided. Note that the mean subtraction is always carried
  1196. // out before scaling.
  1197. optional float scale = 17 [default = 1];
  1198. optional string meanfile = 18;
  1199. // For data layers, specify the batch size.
  1200. optional uint32 batchsize = 19;
  1201. // For data layers, specify if we would like to randomly crop an image.
  1202. optional uint32 cropsize = 20 [default = 0];
  1203. // For data layers, specify if we want to randomly mirror data.
  1204. optional bool mirror = 21 [default = false];
  1205. // The blobs containing the numeric parameters of the layer
  1206. repeated BlobProto blobs = 50;
  1207. // The ratio that is multiplied on the global learning rate. If you want to
  1208. // set the learning ratio for one blob, you need to set it for all blobs.
  1209. repeated float blobs_lr = 51;
  1210. // The weight decay that is multiplied on the global weight decay.
  1211. repeated float weight_decay = 52;
  1212. // The rand_skip variable is for the data layer to skip a few data points
  1213. // to avoid all asynchronous sgd clients to start at the same point. The skip
  1214. // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
  1215. // be larger than the number of keys in the database.
  1216. optional uint32 rand_skip = 53 [default = 0];
  1217. // Fields related to detection (det_*)
  1218. // foreground (object) overlap threshold
  1219. optional float det_fg_threshold = 54 [default = 0.5];
  1220. // background (non-object) overlap threshold
  1221. optional float det_bg_threshold = 55 [default = 0.5];
  1222. // Fraction of batch that should be foreground objects
  1223. optional float det_fg_fraction = 56 [default = 0.25];
  1224. // optional bool OBSOLETE_can_clobber = 57 [default = true];
  1225. // Amount of contextual padding to add around a window
  1226. // (used only by the window_data_layer)
  1227. optional uint32 det_context_pad = 58 [default = 0];
  1228. // Mode for cropping out a detection window
  1229. // warp: cropped window is warped to a fixed size and aspect ratio
  1230. // square: the tightest square around the window is cropped
  1231. optional string det_crop_mode = 59 [default = "warp"];
  1232. // For ReshapeLayer, one needs to specify the new dimensions.
  1233. optional int32 new_num = 60 [default = 0];
  1234. optional int32 new_channels = 61 [default = 0];
  1235. optional int32 new_height = 62 [default = 0];
  1236. optional int32 new_width = 63 [default = 0];
  1237. // Whether or not ImageLayer should shuffle the list of files at every epoch.
  1238. // It will also resize images if new_height or new_width are not zero.
  1239. optional bool shuffle_images = 64 [default = false];
  1240. // For ConcatLayer, one needs to specify the dimension for concatenation, and
  1241. // the other dimensions must be the same for all the bottom blobs.
  1242. // By default it will concatenate blobs along the channels dimension.
  1243. optional uint32 concat_dim = 65 [default = 1];
  1244. optional HDF5OutputParameter hdf5_output_param = 1001;
  1245. }
  1246. message PReLUParameter {
  1247. // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers:
  1248. // Surpassing Human-Level Performance on ImageNet Classification, 2015.
  1249. // Initial value of a_i. Default is a_i=0.25 for all i.
  1250. optional FillerParameter filler = 1;
  1251. // Whether or not slope paramters are shared across channels.
  1252. optional bool channel_shared = 2 [default = false];
  1253. }