PageRenderTime 79ms CodeModel.GetById 30ms RepoModel.GetById 1ms app.codeStats 0ms

/libs/caffe.proto

https://github.com/kazuto1011/deeplab-pytorch
Protocol Buffers | 1458 lines | 764 code | 151 blank | 543 comment | 0 complexity | 269b53cce4f1b8fb5edb6c34eb29b6ba MD5 | raw file
Possible License(s): MIT
  1. syntax = "proto2";
  2. package caffe;
  3. // Specifies the shape (dimensions) of a Blob.
  4. message BlobShape {
  5. repeated int64 dim = 1 [packed = true];
  6. }
  7. message BlobProto {
  8. optional BlobShape shape = 7;
  9. repeated float data = 5 [packed = true];
  10. repeated float diff = 6 [packed = true];
  11. repeated double double_data = 8 [packed = true];
  12. repeated double double_diff = 9 [packed = true];
  13. // 4D dimensions -- deprecated. Use "shape" instead.
  14. optional int32 num = 1 [default = 0];
  15. optional int32 channels = 2 [default = 0];
  16. optional int32 height = 3 [default = 0];
  17. optional int32 width = 4 [default = 0];
  18. }
  19. // The BlobProtoVector is simply a way to pass multiple blobproto instances
  20. // around.
  21. message BlobProtoVector {
  22. repeated BlobProto blobs = 1;
  23. }
  24. message Datum {
  25. optional int32 channels = 1;
  26. optional int32 height = 2;
  27. optional int32 width = 3;
  28. // the actual image data, in bytes
  29. optional bytes data = 4;
  30. optional int32 label = 5;
  31. // Optionally, the datum could also hold float data.
  32. repeated float float_data = 6;
  33. // If true data contains an encoded image that need to be decoded
  34. optional bool encoded = 7 [default = false];
  35. }
  36. message FillerParameter {
  37. // The filler type.
  38. optional string type = 1 [default = 'constant'];
  39. optional float value = 2 [default = 0]; // the value in constant filler
  40. optional float min = 3 [default = 0]; // the min value in uniform filler
  41. optional float max = 4 [default = 1]; // the max value in uniform filler
  42. optional float mean = 5 [default = 0]; // the mean value in Gaussian filler
  43. optional float std = 6 [default = 1]; // the std value in Gaussian filler
  44. // The expected number of non-zero output weights for a given input in
  45. // Gaussian filler -- the default -1 means don't perform sparsification.
  46. optional int32 sparse = 7 [default = -1];
  47. // Normalize the filler variance by fan_in, fan_out, or their average.
  48. // Applies to 'xavier' and 'msra' fillers.
  49. enum VarianceNorm {
  50. FAN_IN = 0;
  51. FAN_OUT = 1;
  52. AVERAGE = 2;
  53. }
  54. optional VarianceNorm variance_norm = 8 [default = FAN_IN];
  55. }
  56. message NetParameter {
  57. optional string name = 1; // consider giving the network a name
  58. // The input blobs to the network.
  59. repeated string input = 3;
  60. // The shape of the input blobs.
  61. repeated BlobShape input_shape = 8;
  62. // 4D input dimensions -- deprecated. Use "shape" instead.
  63. // If specified, for each input blob there should be four
  64. // values specifying the num, channels, height and width of the input blob.
  65. // Thus, there should be a total of (4 * #input) numbers.
  66. repeated int32 input_dim = 4;
  67. // Whether the network will force every layer to carry out backward operation.
  68. // If set False, then whether to carry out backward is determined
  69. // automatically according to the net structure and learning rates.
  70. optional bool force_backward = 5 [default = false];
  71. // The current "state" of the network, including the phase, level, and stage.
  72. // Some layers may be included/excluded depending on this state and the states
  73. // specified in the layers' include and exclude fields.
  74. optional NetState state = 6;
  75. // Print debugging information about results while running Net::Forward,
  76. // Net::Backward, and Net::Update.
  77. optional bool debug_info = 7 [default = false];
  78. // The layers that make up the net. Each of their configurations, including
  79. // connectivity and behavior, is specified as a LayerParameter.
  80. repeated LayerParameter layer = 100; // ID 100 so layers are printed last.
  81. // DEPRECATED: use 'layer' instead.
  82. repeated V1LayerParameter layers = 2;
  83. }
  84. // NOTE
  85. // Update the next available ID when you add a new SolverParameter field.
  86. //
  87. // SolverParameter next available ID: 41 (last added: type)
  88. message SolverParameter {
  89. //////////////////////////////////////////////////////////////////////////////
  90. // Specifying the train and test networks
  91. //
  92. // Exactly one train net must be specified using one of the following fields:
  93. // train_net_param, train_net, net_param, net
  94. // One or more test nets may be specified using any of the following fields:
  95. // test_net_param, test_net, net_param, net
  96. // If more than one test net field is specified (e.g., both net and
  97. // test_net are specified), they will be evaluated in the field order given
  98. // above: (1) test_net_param, (2) test_net, (3) net_param/net.
  99. // A test_iter must be specified for each test_net.
  100. // A test_level and/or a test_stage may also be specified for each test_net.
  101. //////////////////////////////////////////////////////////////////////////////
  102. // Proto filename for the train net, possibly combined with one or more
  103. // test nets.
  104. optional string net = 24;
  105. // Inline train net param, possibly combined with one or more test nets.
  106. optional NetParameter net_param = 25;
  107. optional string train_net = 1; // Proto filename for the train net.
  108. repeated string test_net = 2; // Proto filenames for the test nets.
  109. optional NetParameter train_net_param = 21; // Inline train net params.
  110. repeated NetParameter test_net_param = 22; // Inline test net params.
  111. // The states for the train/test nets. Must be unspecified or
  112. // specified once per net.
  113. //
  114. // By default, all states will have solver = true;
  115. // train_state will have phase = TRAIN,
  116. // and all test_state's will have phase = TEST.
  117. // Other defaults are set according to the NetState defaults.
  118. optional NetState train_state = 26;
  119. repeated NetState test_state = 27;
  120. // The number of iterations for each test net.
  121. repeated int32 test_iter = 3;
  122. // The number of iterations between two testing phases.
  123. optional int32 test_interval = 4 [default = 0];
  124. optional bool test_compute_loss = 19 [default = false];
  125. // If true, run an initial test pass before the first iteration,
  126. // ensuring memory availability and printing the starting value of the loss.
  127. optional bool test_initialization = 32 [default = true];
  128. optional float base_lr = 5; // The base learning rate
  129. // the number of iterations between displaying info. If display = 0, no info
  130. // will be displayed.
  131. optional int32 display = 6;
  132. // Display the loss averaged over the last average_loss iterations
  133. optional int32 average_loss = 33 [default = 1];
  134. optional int32 max_iter = 7; // the maximum number of iterations
  135. // accumulate gradients over `iter_size` x `batch_size` instances
  136. optional int32 iter_size = 36 [default = 1];
  137. // The learning rate decay policy. The currently implemented learning rate
  138. // policies are as follows:
  139. // - fixed: always return base_lr.
  140. // - step: return base_lr * gamma ^ (floor(iter / step))
  141. // - exp: return base_lr * gamma ^ iter
  142. // - inv: return base_lr * (1 + gamma * iter) ^ (- power)
  143. // - multistep: similar to step but it allows non uniform steps defined by
  144. // stepvalue
  145. // - poly: the effective learning rate follows a polynomial decay, to be
  146. // zero by the max_iter. return base_lr (1 - iter/max_iter) ^ (power)
  147. // - sigmoid: the effective learning rate follows a sigmod decay
  148. // return base_lr ( 1/(1 + exp(-gamma * (iter - stepsize))))
  149. //
  150. // where base_lr, max_iter, gamma, step, stepvalue and power are defined
  151. // in the solver parameter protocol buffer, and iter is the current iteration.
  152. optional string lr_policy = 8;
  153. optional float gamma = 9; // The parameter to compute the learning rate.
  154. optional float power = 10; // The parameter to compute the learning rate.
  155. optional float momentum = 11; // The momentum value.
  156. optional float weight_decay = 12; // The weight decay.
  157. // regularization types supported: L1 and L2
  158. // controlled by weight_decay
  159. optional string regularization_type = 29 [default = "L2"];
  160. // the stepsize for learning rate policy "step"
  161. optional int32 stepsize = 13;
  162. // the stepsize for learning rate policy "multistep"
  163. repeated int32 stepvalue = 34;
  164. // Set clip_gradients to >= 0 to clip parameter gradients to that L2 norm,
  165. // whenever their actual L2 norm is larger.
  166. optional float clip_gradients = 35 [default = -1];
  167. optional int32 snapshot = 14 [default = 0]; // The snapshot interval
  168. optional string snapshot_prefix = 15; // The prefix for the snapshot.
  169. // whether to snapshot diff in the results or not. Snapshotting diff will help
  170. // debugging but the final protocol buffer size will be much larger.
  171. optional bool snapshot_diff = 16 [default = false];
  172. enum SnapshotFormat {
  173. HDF5 = 0;
  174. BINARYPROTO = 1;
  175. }
  176. optional SnapshotFormat snapshot_format = 37 [default = BINARYPROTO];
  177. // the mode solver will use: 0 for CPU and 1 for GPU. Use GPU in default.
  178. enum SolverMode {
  179. CPU = 0;
  180. GPU = 1;
  181. }
  182. optional SolverMode solver_mode = 17 [default = GPU];
  183. // the device_id will that be used in GPU mode. Use device_id = 0 in default.
  184. optional int32 device_id = 18 [default = 0];
  185. // If non-negative, the seed with which the Solver will initialize the Caffe
  186. // random number generator -- useful for reproducible results. Otherwise,
  187. // (and by default) initialize using a seed derived from the system clock.
  188. optional int64 random_seed = 20 [default = -1];
  189. // type of the solver
  190. optional string type = 40 [default = "SGD"];
  191. // numerical stability for RMSProp, AdaGrad and AdaDelta and Adam
  192. optional float delta = 31 [default = 1e-8];
  193. // parameters for the Adam solver
  194. optional float momentum2 = 39 [default = 0.999];
  195. // RMSProp decay value
  196. // MeanSquare(t) = rms_decay*MeanSquare(t-1) + (1-rms_decay)*SquareGradient(t)
  197. optional float rms_decay = 38;
  198. // If true, print information about the state of the net that may help with
  199. // debugging learning problems.
  200. optional bool debug_info = 23 [default = false];
  201. // If false, don't save a snapshot after training finishes.
  202. optional bool snapshot_after_train = 28 [default = true];
  203. // DEPRECATED: old solver enum types, use string instead
  204. enum SolverType {
  205. SGD = 0;
  206. NESTEROV = 1;
  207. ADAGRAD = 2;
  208. RMSPROP = 3;
  209. ADADELTA = 4;
  210. ADAM = 5;
  211. }
  212. // DEPRECATED: use type instead of solver_type
  213. optional SolverType solver_type = 30 [default = SGD];
  214. }
  215. // A message that stores the solver snapshots
  216. message SolverState {
  217. optional int32 iter = 1; // The current iteration
  218. optional string learned_net = 2; // The file that stores the learned net.
  219. repeated BlobProto history = 3; // The history for sgd solvers
  220. optional int32 current_step = 4 [default = 0]; // The current step for learning rate
  221. }
  222. enum Phase {
  223. TRAIN = 0;
  224. TEST = 1;
  225. }
  226. message NetState {
  227. optional Phase phase = 1 [default = TEST];
  228. optional int32 level = 2 [default = 0];
  229. repeated string stage = 3;
  230. }
  231. message NetStateRule {
  232. // Set phase to require the NetState have a particular phase (TRAIN or TEST)
  233. // to meet this rule.
  234. optional Phase phase = 1;
  235. // Set the minimum and/or maximum levels in which the layer should be used.
  236. // Leave undefined to meet the rule regardless of level.
  237. optional int32 min_level = 2;
  238. optional int32 max_level = 3;
  239. // Customizable sets of stages to include or exclude.
  240. // The net must have ALL of the specified stages and NONE of the specified
  241. // "not_stage"s to meet the rule.
  242. // (Use multiple NetStateRules to specify conjunctions of stages.)
  243. repeated string stage = 4;
  244. repeated string not_stage = 5;
  245. }
  246. // Specifies training parameters (multipliers on global learning constants,
  247. // and the name and other settings used for weight sharing).
  248. message ParamSpec {
  249. // The names of the parameter blobs -- useful for sharing parameters among
  250. // layers, but never required otherwise. To share a parameter between two
  251. // layers, give it a (non-empty) name.
  252. optional string name = 1;
  253. // Whether to require shared weights to have the same shape, or just the same
  254. // count -- defaults to STRICT if unspecified.
  255. optional DimCheckMode share_mode = 2;
  256. enum DimCheckMode {
  257. // STRICT (default) requires that num, channels, height, width each match.
  258. STRICT = 0;
  259. // PERMISSIVE requires only the count (num*channels*height*width) to match.
  260. PERMISSIVE = 1;
  261. }
  262. // The multiplier on the global learning rate for this parameter.
  263. optional float lr_mult = 3 [default = 1.0];
  264. // The multiplier on the global weight decay for this parameter.
  265. optional float decay_mult = 4 [default = 1.0];
  266. }
  267. // NOTE
  268. // Update the next available ID when you add a new LayerParameter field.
  269. //
  270. // LayerParameter next available layer-specific ID: 152 (last added: mat_read_param)
  271. message LayerParameter {
  272. optional string name = 1; // the layer name
  273. optional string type = 2; // the layer type
  274. repeated string bottom = 3; // the name of each bottom blob
  275. repeated string top = 4; // the name of each top blob
  276. // The train / test phase for computation.
  277. optional Phase phase = 10;
  278. // The amount of weight to assign each top blob in the objective.
  279. // Each layer assigns a default value, usually of either 0 or 1,
  280. // to each top blob.
  281. repeated float loss_weight = 5;
  282. // Specifies training parameters (multipliers on global learning constants,
  283. // and the name and other settings used for weight sharing).
  284. repeated ParamSpec param = 6;
  285. // The blobs containing the numeric parameters of the layer.
  286. repeated BlobProto blobs = 7;
  287. // Specifies on which bottoms the backpropagation should be skipped.
  288. // The size must be either 0 or equal to the number of bottoms.
  289. repeated bool propagate_down = 11;
  290. // Rules controlling whether and when a layer is included in the network,
  291. // based on the current NetState. You may specify a non-zero number of rules
  292. // to include OR exclude, but not both. If no include or exclude rules are
  293. // specified, the layer is always included. If the current NetState meets
  294. // ANY (i.e., one or more) of the specified rules, the layer is
  295. // included/excluded.
  296. repeated NetStateRule include = 8;
  297. repeated NetStateRule exclude = 9;
  298. // Parameters for data pre-processing.
  299. optional TransformationParameter transform_param = 100;
  300. // Parameters shared by loss layers.
  301. optional LossParameter loss_param = 101;
  302. // Layer type-specific parameters.
  303. //
  304. // Note: certain layers may have more than one computational engine
  305. // for their implementation. These layers include an Engine type and
  306. // engine parameter for selecting the implementation.
  307. // The default for the engine is set by the ENGINE switch at compile-time.
  308. optional AccuracyParameter accuracy_param = 102;
  309. optional AdaptiveBiasChannelParameter adaptive_bias_channel_param = 148;
  310. optional ArgMaxParameter argmax_param = 103;
  311. optional BatchNormParameter batch_norm_param = 139;
  312. optional BiasParameter bias_param = 141;
  313. optional BiasChannelParameter bias_channel_param = 149;
  314. optional ConcatParameter concat_param = 104;
  315. optional ContrastiveLossParameter contrastive_loss_param = 105;
  316. optional ConvolutionParameter convolution_param = 106;
  317. optional DataParameter data_param = 107;
  318. optional DenseCRFParameter dense_crf_param = 146;
  319. optional DomainTransformParameter domain_transform_param = 147;
  320. optional DropoutParameter dropout_param = 108;
  321. optional DummyDataParameter dummy_data_param = 109;
  322. optional EltwiseParameter eltwise_param = 110;
  323. optional ELUParameter elu_param = 140;
  324. optional EmbedParameter embed_param = 137;
  325. optional ExpParameter exp_param = 111;
  326. optional FlattenParameter flatten_param = 135;
  327. optional HDF5DataParameter hdf5_data_param = 112;
  328. optional HDF5OutputParameter hdf5_output_param = 113;
  329. optional HingeLossParameter hinge_loss_param = 114;
  330. optional ImageDataParameter image_data_param = 115;
  331. optional InfogainLossParameter infogain_loss_param = 116;
  332. optional InnerProductParameter inner_product_param = 117;
  333. optional InterpParameter interp_param = 143;
  334. optional LogParameter log_param = 134;
  335. optional LRNParameter lrn_param = 118;
  336. optional MatReadParameter mat_read_param = 151;
  337. optional MatWriteParameter mat_write_param = 145;
  338. optional MemoryDataParameter memory_data_param = 119;
  339. optional MVNParameter mvn_param = 120;
  340. optional PoolingParameter pooling_param = 121;
  341. optional PowerParameter power_param = 122;
  342. optional PReLUParameter prelu_param = 131;
  343. optional PythonParameter python_param = 130;
  344. optional ReductionParameter reduction_param = 136;
  345. optional ReLUParameter relu_param = 123;
  346. optional ReshapeParameter reshape_param = 133;
  347. optional ScaleParameter scale_param = 142;
  348. optional SegAccuracyParameter seg_accuracy_param = 144;
  349. optional SigmoidParameter sigmoid_param = 124;
  350. optional SoftmaxParameter softmax_param = 125;
  351. optional SPPParameter spp_param = 132;
  352. optional SliceParameter slice_param = 126;
  353. optional TanHParameter tanh_param = 127;
  354. optional ThresholdParameter threshold_param = 128;
  355. optional TileParameter tile_param = 138;
  356. optional UniqueLabelParameter unique_label_param = 150;
  357. optional WindowDataParameter window_data_param = 129;
  358. }
  359. // Message that stores parameters used to apply transformation
  360. // to the data layer's data
  361. message TransformationParameter {
  362. // For data pre-processing, we can do simple scaling and subtracting the
  363. // data mean, if provided. Note that the mean subtraction is always carried
  364. // out before scaling.
  365. optional float scale = 1 [default = 1];
  366. // Specify if we want to randomly mirror data.
  367. optional bool mirror = 2 [default = false];
  368. // Specify if we would like to randomly crop an image.
  369. optional uint32 crop_size = 3 [default = 0];
  370. // mean_file and mean_value cannot be specified at the same time
  371. optional string mean_file = 4;
  372. // if specified can be repeated once (would substract it from all the channels)
  373. // or can be repeated the same number of times as channels
  374. // (would subtract them from the corresponding channel)
  375. repeated float mean_value = 5;
  376. // Force the decoded image to have 3 color channels.
  377. optional bool force_color = 6 [default = false];
  378. // Force the decoded image to have 1 color channels.
  379. optional bool force_gray = 7 [default = false];
  380. // If we want to do data augmentation, Scaling factor for randomly scaling input images
  381. repeated float scale_factors = 8;
  382. // the width for cropped region
  383. optional uint32 crop_width = 9 [default = 0];
  384. // the height for cropped region
  385. optional uint32 crop_height = 10 [default = 0];
  386. }
  387. // Message that stores parameters shared by loss layers
  388. message LossParameter {
  389. // If specified, ignore instances with the given label.
  390. optional int32 ignore_label = 1;
  391. // How to normalize the loss for loss layers that aggregate across batches,
  392. // spatial dimensions, or other dimensions. Currently only implemented in
  393. // SoftmaxWithLoss layer.
  394. enum NormalizationMode {
  395. // Divide by the number of examples in the batch times spatial dimensions.
  396. // Outputs that receive the ignore label will NOT be ignored in computing
  397. // the normalization factor.
  398. FULL = 0;
  399. // Divide by the total number of output locations that do not take the
  400. // ignore_label. If ignore_label is not set, this behaves like FULL.
  401. VALID = 1;
  402. // Divide by the batch size.
  403. BATCH_SIZE = 2;
  404. // Do not normalize the loss.
  405. NONE = 3;
  406. }
  407. optional NormalizationMode normalization = 3 [default = VALID];
  408. // Deprecated. Ignored if normalization is specified. If normalization
  409. // is not specified, then setting this to false will be equivalent to
  410. // normalization = BATCH_SIZE to be consistent with previous behavior.
  411. optional bool normalize = 2;
  412. }
  413. // Messages that store parameters used by individual layer types follow, in
  414. // alphabetical order.
  415. message AccuracyParameter {
  416. // When computing accuracy, count as correct by comparing the true label to
  417. // the top k scoring classes. By default, only compare to the top scoring
  418. // class (i.e. argmax).
  419. optional uint32 top_k = 1 [default = 1];
  420. // The "label" axis of the prediction blob, whose argmax corresponds to the
  421. // predicted label -- may be negative to index from the end (e.g., -1 for the
  422. // last axis). For example, if axis == 1 and the predictions are
  423. // (N x C x H x W), the label blob is expected to contain N*H*W ground truth
  424. // labels with integer values in {0, 1, ..., C-1}.
  425. optional int32 axis = 2 [default = 1];
  426. // If specified, ignore instances with the given label.
  427. optional int32 ignore_label = 3;
  428. }
  429. message AdaptiveBiasChannelParameter {
  430. optional int32 num_iter = 1 [default = 1];
  431. optional float bg_portion = 2 [default = 0.2];
  432. optional float fg_portion = 3 [default = 0.2];
  433. optional bool suppress_others = 4 [default = true];
  434. optional float margin_others = 5 [default = 1e-5];
  435. }
  436. message ArgMaxParameter {
  437. // If true produce pairs (argmax, maxval)
  438. optional bool out_max_val = 1 [default = false];
  439. optional uint32 top_k = 2 [default = 1];
  440. // The axis along which to maximise -- may be negative to index from the
  441. // end (e.g., -1 for the last axis).
  442. // By default ArgMaxLayer maximizes over the flattened trailing dimensions
  443. // for each index of the first / num dimension.
  444. optional int32 axis = 3;
  445. }
  446. message BiasChannelParameter {
  447. // Score biases. Separate values for BG / FG
  448. optional float bg_bias = 1 [default = 1.];
  449. optional float fg_bias = 2 [default = 2.];
  450. // will ignore labels with this value when adding bias
  451. repeated int32 ignore_label = 3;
  452. enum LabelType {
  453. IMAGE = 1;
  454. PIXEL = 2;
  455. }
  456. optional LabelType label_type = 4 [default = IMAGE];
  457. // If the dataset defines generic background label or not.
  458. // The default value is defined for PASCAL VOC segmentation
  459. optional int32 background_label = 6 [default = 0];
  460. }
  461. message ConcatParameter {
  462. // The axis along which to concatenate -- may be negative to index from the
  463. // end (e.g., -1 for the last axis). Other axes must have the
  464. // same dimension for all the bottom blobs.
  465. // By default, ConcatLayer concatenates blobs along the "channels" axis (1).
  466. optional int32 axis = 2 [default = 1];
  467. // DEPRECATED: alias for "axis" -- does not support negative indexing.
  468. optional uint32 concat_dim = 1 [default = 1];
  469. }
  470. message BatchNormParameter {
  471. // If false, accumulate global mean/variance values via a moving average. If
  472. // true, use those accumulated values instead of computing mean/variance
  473. // across the batch.
  474. optional bool use_global_stats = 1;
  475. // How much does the moving average decay each iteration?
  476. optional float moving_average_fraction = 2 [default = .999];
  477. // Small value to add to the variance estimate so that we don't divide by
  478. // zero.
  479. optional float eps = 3 [default = 1e-5];
  480. optional bool update_global_stats = 4 [default = false];
  481. }
  482. message BiasParameter {
  483. // The first axis of bottom[0] (the first input Blob) along which to apply
  484. // bottom[1] (the second input Blob). May be negative to index from the end
  485. // (e.g., -1 for the last axis).
  486. //
  487. // For example, if bottom[0] is 4D with shape 100x3x40x60, the output
  488. // top[0] will have the same shape, and bottom[1] may have any of the
  489. // following shapes (for the given value of axis):
  490. // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60
  491. // (axis == 1 == -3) 3; 3x40; 3x40x60
  492. // (axis == 2 == -2) 40; 40x60
  493. // (axis == 3 == -1) 60
  494. // Furthermore, bottom[1] may have the empty shape (regardless of the value of
  495. // "axis") -- a scalar bias.
  496. optional int32 axis = 1 [default = 1];
  497. // (num_axes is ignored unless just one bottom is given and the bias is
  498. // a learned parameter of the layer. Otherwise, num_axes is determined by the
  499. // number of axes by the second bottom.)
  500. // The number of axes of the input (bottom[0]) covered by the bias
  501. // parameter, or -1 to cover all axes of bottom[0] starting from `axis`.
  502. // Set num_axes := 0, to add a zero-axis Blob: a scalar.
  503. optional int32 num_axes = 2 [default = 1];
  504. // (filler is ignored unless just one bottom is given and the bias is
  505. // a learned parameter of the layer.)
  506. // The initialization for the learned bias parameter.
  507. // Default is the zero (0) initialization, resulting in the BiasLayer
  508. // initially performing the identity operation.
  509. optional FillerParameter filler = 3;
  510. }
  511. message ContrastiveLossParameter {
  512. // margin for dissimilar pair
  513. optional float margin = 1 [default = 1.0];
  514. // The first implementation of this cost did not exactly match the cost of
  515. // Hadsell et al 2006 -- using (margin - d^2) instead of (margin - d)^2.
  516. // legacy_version = false (the default) uses (margin - d)^2 as proposed in the
  517. // Hadsell paper. New models should probably use this version.
  518. // legacy_version = true uses (margin - d^2). This is kept to support /
  519. // reproduce existing models and results
  520. optional bool legacy_version = 2 [default = false];
  521. }
  522. message ConvolutionParameter {
  523. optional uint32 num_output = 1; // The number of outputs for the layer
  524. optional bool bias_term = 2 [default = true]; // whether to have bias terms
  525. // Pad, kernel size, and stride are all given as a single value for equal
  526. // dimensions in all spatial dimensions, or once per spatial dimension.
  527. repeated uint32 pad = 3; // The padding size; defaults to 0
  528. repeated uint32 kernel_size = 4; // The kernel size
  529. repeated uint32 stride = 6; // The stride; defaults to 1
  530. // Factor used to dilate the kernel, (implicitly) zero-filling the resulting
  531. // holes. (Kernel dilation is sometimes referred to by its use in the
  532. // algorithme à trous from Holschneider et al. 1987.)
  533. repeated uint32 dilation = 18; // The dilation; defaults to 1
  534. // For 2D convolution only, the *_h and *_w versions may also be used to
  535. // specify both spatial dimensions.
  536. optional uint32 pad_h = 9 [default = 0]; // The padding height (2D only)
  537. optional uint32 pad_w = 10 [default = 0]; // The padding width (2D only)
  538. optional uint32 kernel_h = 11; // The kernel height (2D only)
  539. optional uint32 kernel_w = 12; // The kernel width (2D only)
  540. optional uint32 stride_h = 13; // The stride height (2D only)
  541. optional uint32 stride_w = 14; // The stride width (2D only)
  542. optional uint32 group = 5 [default = 1]; // The group size for group conv
  543. optional FillerParameter weight_filler = 7; // The filler for the weight
  544. optional FillerParameter bias_filler = 8; // The filler for the bias
  545. enum Engine {
  546. DEFAULT = 0;
  547. CAFFE = 1;
  548. CUDNN = 2;
  549. }
  550. optional Engine engine = 15 [default = DEFAULT];
  551. // The axis to interpret as "channels" when performing convolution.
  552. // Preceding dimensions are treated as independent inputs;
  553. // succeeding dimensions are treated as "spatial".
  554. // With (N, C, H, W) inputs, and axis == 1 (the default), we perform
  555. // N independent 2D convolutions, sliding C-channel (or (C/g)-channels, for
  556. // groups g>1) filters across the spatial axes (H, W) of the input.
  557. // With (N, C, D, H, W) inputs, and axis == 1, we perform
  558. // N independent 3D convolutions, sliding (C/g)-channels
  559. // filters across the spatial axes (D, H, W) of the input.
  560. optional int32 axis = 16 [default = 1];
  561. // Whether to force use of the general ND convolution, even if a specific
  562. // implementation for blobs of the appropriate number of spatial dimensions
  563. // is available. (Currently, there is only a 2D-specific convolution
  564. // implementation; for input blobs with num_axes != 2, this option is
  565. // ignored and the ND implementation will be used.)
  566. optional bool force_nd_im2col = 17 [default = false];
  567. }
  568. message DataParameter {
  569. enum DB {
  570. LEVELDB = 0;
  571. LMDB = 1;
  572. }
  573. // Specify the data source.
  574. optional string source = 1;
  575. // Specify the batch size.
  576. optional uint32 batch_size = 4;
  577. // The rand_skip variable is for the data layer to skip a few data points
  578. // to avoid all asynchronous sgd clients to start at the same point. The skip
  579. // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
  580. // be larger than the number of keys in the database.
  581. // DEPRECATED. Each solver accesses a different subset of the database.
  582. optional uint32 rand_skip = 7 [default = 0];
  583. optional DB backend = 8 [default = LEVELDB];
  584. // DEPRECATED. See TransformationParameter. For data pre-processing, we can do
  585. // simple scaling and subtracting the data mean, if provided. Note that the
  586. // mean subtraction is always carried out before scaling.
  587. optional float scale = 2 [default = 1];
  588. optional string mean_file = 3;
  589. // DEPRECATED. See TransformationParameter. Specify if we would like to randomly
  590. // crop an image.
  591. optional uint32 crop_size = 5 [default = 0];
  592. // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror
  593. // data.
  594. optional bool mirror = 6 [default = false];
  595. // Force the encoded image to have 3 color channels
  596. optional bool force_encoded_color = 9 [default = false];
  597. // Prefetch queue (Number of batches to prefetch to host memory, increase if
  598. // data access bandwidth varies).
  599. optional uint32 prefetch = 10 [default = 4];
  600. }
  601. message DenseCRFParameter {
  602. // max number of iteration for message passing
  603. optional int32 max_iter = 1 [default = 10];
  604. // positional std and weight for "Positional" filter (color-independent)
  605. repeated float pos_xy_std = 2;
  606. repeated float pos_w = 3;
  607. // positional std, color std and weight for Bilateral filter
  608. repeated float bi_xy_std = 4;
  609. repeated float bi_rgb_std = 5;
  610. repeated float bi_w = 6;
  611. // output is probability or score (score = log(prob))
  612. optional bool output_probability = 7 [default = true];
  613. }
  614. message DomainTransformParameter {
  615. // Max number of iteration for filtering.
  616. optional int32 num_iter = 1 [default = 3];
  617. // Standard deviation for spatial domain.
  618. optional float spatial_sigma = 2 [default = 50];
  619. // Standard deviation for range domain.
  620. optional float range_sigma = 3 [default = 5];
  621. // minimum weight value (to avoid zero gradient for ref_grad_data)
  622. optional float min_weight = 4 [default = 0];
  623. }
  624. message DropoutParameter {
  625. optional float dropout_ratio = 1 [default = 0.5]; // dropout ratio
  626. }
  627. // DummyDataLayer fills any number of arbitrarily shaped blobs with random
  628. // (or constant) data generated by "Fillers" (see "message FillerParameter").
  629. message DummyDataParameter {
  630. // This layer produces N >= 1 top blobs. DummyDataParameter must specify 1 or N
  631. // shape fields, and 0, 1 or N data_fillers.
  632. //
  633. // If 0 data_fillers are specified, ConstantFiller with a value of 0 is used.
  634. // If 1 data_filler is specified, it is applied to all top blobs. If N are
  635. // specified, the ith is applied to the ith top blob.
  636. repeated FillerParameter data_filler = 1;
  637. repeated BlobShape shape = 6;
  638. // 4D dimensions -- deprecated. Use "shape" instead.
  639. repeated uint32 num = 2;
  640. repeated uint32 channels = 3;
  641. repeated uint32 height = 4;
  642. repeated uint32 width = 5;
  643. }
  644. message EltwiseParameter {
  645. enum EltwiseOp {
  646. PROD = 0;
  647. SUM = 1;
  648. MAX = 2;
  649. }
  650. optional EltwiseOp operation = 1 [default = SUM]; // element-wise operation
  651. repeated float coeff = 2; // blob-wise coefficient for SUM operation
  652. // Whether to use an asymptotically slower (for >2 inputs) but stabler method
  653. // of computing the gradient for the PROD operation. (No effect for SUM op.)
  654. optional bool stable_prod_grad = 3 [default = true];
  655. }
  656. // Message that stores parameters used by ELULayer
  657. message ELUParameter {
  658. // Described in:
  659. // Clevert, D.-A., Unterthiner, T., & Hochreiter, S. (2015). Fast and Accurate
  660. // Deep Network Learning by Exponential Linear Units (ELUs). arXiv
  661. optional float alpha = 1 [default = 1];
  662. }
  663. // Message that stores parameters used by EmbedLayer
  664. message EmbedParameter {
  665. optional uint32 num_output = 1; // The number of outputs for the layer
  666. // The input is given as integers to be interpreted as one-hot
  667. // vector indices with dimension num_input. Hence num_input should be
  668. // 1 greater than the maximum possible input value.
  669. optional uint32 input_dim = 2;
  670. optional bool bias_term = 3 [default = true]; // Whether to use a bias term
  671. optional FillerParameter weight_filler = 4; // The filler for the weight
  672. optional FillerParameter bias_filler = 5; // The filler for the bias
  673. }
  674. // Message that stores parameters used by ExpLayer
  675. message ExpParameter {
  676. // ExpLayer computes outputs y = base ^ (shift + scale * x), for base > 0.
  677. // Or if base is set to the default (-1), base is set to e,
  678. // so y = exp(shift + scale * x).
  679. optional float base = 1 [default = -1.0];
  680. optional float scale = 2 [default = 1.0];
  681. optional float shift = 3 [default = 0.0];
  682. }
  683. /// Message that stores parameters used by FlattenLayer
  684. message FlattenParameter {
  685. // The first axis to flatten: all preceding axes are retained in the output.
  686. // May be negative to index from the end (e.g., -1 for the last axis).
  687. optional int32 axis = 1 [default = 1];
  688. // The last axis to flatten: all following axes are retained in the output.
  689. // May be negative to index from the end (e.g., the default -1 for the last
  690. // axis).
  691. optional int32 end_axis = 2 [default = -1];
  692. }
  693. // Message that stores parameters used by HDF5DataLayer
  694. message HDF5DataParameter {
  695. // Specify the data source.
  696. optional string source = 1;
  697. // Specify the batch size.
  698. optional uint32 batch_size = 2;
  699. // Specify whether to shuffle the data.
  700. // If shuffle == true, the ordering of the HDF5 files is shuffled,
  701. // and the ordering of data within any given HDF5 file is shuffled,
  702. // but data between different files are not interleaved; all of a file's
  703. // data are output (in a random order) before moving onto another file.
  704. optional bool shuffle = 3 [default = false];
  705. }
  706. message HDF5OutputParameter {
  707. optional string file_name = 1;
  708. }
  709. message HingeLossParameter {
  710. enum Norm {
  711. L1 = 1;
  712. L2 = 2;
  713. }
  714. // Specify the Norm to use L1 or L2
  715. optional Norm norm = 1 [default = L1];
  716. }
  717. message ImageDataParameter {
  718. // Specify the data source.
  719. optional string source = 1;
  720. // Specify the batch size.
  721. optional uint32 batch_size = 4 [default = 1];
  722. // The rand_skip variable is for the data layer to skip a few data points
  723. // to avoid all asynchronous sgd clients to start at the same point. The skip
  724. // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
  725. // be larger than the number of keys in the database.
  726. optional uint32 rand_skip = 7 [default = 0];
  727. // Whether or not ImageLayer should shuffle the list of files at every epoch.
  728. optional bool shuffle = 8 [default = false];
  729. // It will also resize images if new_height or new_width are not zero.
  730. optional uint32 new_height = 9 [default = 0];
  731. optional uint32 new_width = 10 [default = 0];
  732. // Specify if the images are color or gray
  733. optional bool is_color = 11 [default = true];
  734. // This is the value set for pixels or images where we don't know the label
  735. optional int32 ignore_label = 15 [default = 255];
  736. enum LabelType {
  737. NONE = 0;
  738. IMAGE = 1;
  739. PIXEL = 2;
  740. }
  741. optional LabelType label_type = 16 [default = IMAGE];
  742. // DEPRECATED. See TransformationParameter. For data pre-processing, we can do
  743. // simple scaling and subtracting the data mean, if provided. Note that the
  744. // mean subtraction is always carried out before scaling.
  745. optional float scale = 2 [default = 1];
  746. optional string mean_file = 3;
  747. // DEPRECATED. See TransformationParameter. Specify if we would like to randomly
  748. // crop an image.
  749. optional uint32 crop_size = 5 [default = 0];
  750. // DEPRECATED. See TransformationParameter. Specify if we want to randomly mirror
  751. // data.
  752. optional bool mirror = 6 [default = false];
  753. optional string root_folder = 12 [default = ""];
  754. }
  755. message InfogainLossParameter {
  756. // Specify the infogain matrix source.
  757. optional string source = 1;
  758. }
  759. message InnerProductParameter {
  760. optional uint32 num_output = 1; // The number of outputs for the layer
  761. optional bool bias_term = 2 [default = true]; // whether to have bias terms
  762. optional FillerParameter weight_filler = 3; // The filler for the weight
  763. optional FillerParameter bias_filler = 4; // The filler for the bias
  764. // The first axis to be lumped into a single inner product computation;
  765. // all preceding axes are retained in the output.
  766. // May be negative to index from the end (e.g., -1 for the last axis).
  767. optional int32 axis = 5 [default = 1];
  768. // Specify whether to transpose the weight matrix or not.
  769. // If transpose == true, any operations will be performed on the transpose
  770. // of the weight matrix. The weight matrix itself is not going to be transposed
  771. // but rather the transfer flag of operations will be toggled accordingly.
  772. optional bool transpose = 6 [default = false];
  773. }
  774. message InterpParameter {
  775. optional int32 height = 1 [default = 0]; // Height of output
  776. optional int32 width = 2 [default = 0]; // Width of output
  777. optional int32 zoom_factor = 3 [default = 1]; // zoom factor
  778. optional int32 shrink_factor = 4 [default = 1]; // shrink factor
  779. optional int32 pad_beg = 5 [default = 0]; // padding at begin of input
  780. optional int32 pad_end = 6 [default = 0]; // padding at end of input
  781. }
  782. // Message that stores parameters used by LogLayer
  783. message LogParameter {
  784. // LogLayer computes outputs y = log_base(shift + scale * x), for base > 0.
  785. // Or if base is set to the default (-1), base is set to e,
  786. // so y = ln(shift + scale * x) = log_e(shift + scale * x)
  787. optional float base = 1 [default = -1.0];
  788. optional float scale = 2 [default = 1.0];
  789. optional float shift = 3 [default = 0.0];
  790. }
  791. // Message that stores parameters used by LRNLayer
  792. message LRNParameter {
  793. optional uint32 local_size = 1 [default = 5];
  794. optional float alpha = 2 [default = 1.];
  795. optional float beta = 3 [default = 0.75];
  796. enum NormRegion {
  797. ACROSS_CHANNELS = 0;
  798. WITHIN_CHANNEL = 1;
  799. }
  800. optional NormRegion norm_region = 4 [default = ACROSS_CHANNELS];
  801. optional float k = 5 [default = 1.];
  802. enum Engine {
  803. DEFAULT = 0;
  804. CAFFE = 1;
  805. CUDNN = 2;
  806. }
  807. optional Engine engine = 6 [default = DEFAULT];
  808. }
  809. message MatReadParameter {
  810. required string prefix = 1;
  811. optional string source = 2 [default = ""];
  812. optional int32 strip = 3 [default = 0];
  813. optional int32 batch_size = 4 [default = 1];
  814. }
  815. message MatWriteParameter {
  816. required string prefix = 1;
  817. optional string source = 2 [default = ""];
  818. optional int32 strip = 3 [default = 0];
  819. optional int32 period = 4 [default = 1];
  820. }
  821. message MemoryDataParameter {
  822. optional uint32 batch_size = 1;
  823. optional uint32 channels = 2;
  824. optional uint32 height = 3;
  825. optional uint32 width = 4;
  826. }
  827. message MVNParameter {
  828. // This parameter can be set to false to normalize mean only
  829. optional bool normalize_variance = 1 [default = true];
  830. // This parameter can be set to true to perform DNN-like MVN
  831. optional bool across_channels = 2 [default = false];
  832. // Epsilon for not dividing by zero while normalizing variance
  833. optional float eps = 3 [default = 1e-9];
  834. }
  835. message PoolingParameter {
  836. enum PoolMethod {
  837. MAX = 0;
  838. AVE = 1;
  839. STOCHASTIC = 2;
  840. }
  841. optional PoolMethod pool = 1 [default = MAX]; // The pooling method
  842. // Pad, kernel size, and stride are all given as a single value for equal
  843. // dimensions in height and width or as Y, X pairs.
  844. optional uint32 pad = 4 [default = 0]; // The padding size (equal in Y, X)
  845. optional uint32 pad_h = 9 [default = 0]; // The padding height
  846. optional uint32 pad_w = 10 [default = 0]; // The padding width
  847. optional uint32 kernel_size = 2; // The kernel size (square)
  848. optional uint32 kernel_h = 5; // The kernel height
  849. optional uint32 kernel_w = 6; // The kernel width
  850. optional uint32 stride = 3 [default = 1]; // The stride (equal in Y, X)
  851. optional uint32 stride_h = 7; // The stride height
  852. optional uint32 stride_w = 8; // The stride width
  853. enum Engine {
  854. DEFAULT = 0;
  855. CAFFE = 1;
  856. CUDNN = 2;
  857. }
  858. optional Engine engine = 11 [default = DEFAULT];
  859. // If global_pooling then it will pool over the size of the bottom by doing
  860. // kernel_h = bottom->height and kernel_w = bottom->width
  861. optional bool global_pooling = 12 [default = false];
  862. }
  863. message PowerParameter {
  864. // PowerLayer computes outputs y = (shift + scale * x) ^ power.
  865. optional float power = 1 [default = 1.0];
  866. optional float scale = 2 [default = 1.0];
  867. optional float shift = 3 [default = 0.0];
  868. }
  869. message PythonParameter {
  870. optional string module = 1;
  871. optional string layer = 2;
  872. // This value is set to the attribute `param_str` of the `PythonLayer` object
  873. // in Python before calling the `setup()` method. This could be a number,
  874. // string, dictionary in Python dict format, JSON, etc. You may parse this
  875. // string in `setup` method and use it in `forward` and `backward`.
  876. optional string param_str = 3 [default = ''];
  877. // Whether this PythonLayer is shared among worker solvers during data parallelism.
  878. // If true, each worker solver sequentially run forward from this layer.
  879. // This value should be set true if you are using it as a data layer.
  880. optional bool share_in_parallel = 4 [default = false];
  881. }
  882. // Message that stores parameters used by ReductionLayer
  883. message ReductionParameter {
  884. enum ReductionOp {
  885. SUM = 1;
  886. ASUM = 2;
  887. SUMSQ = 3;
  888. MEAN = 4;
  889. }
  890. optional ReductionOp operation = 1 [default = SUM]; // reduction operation
  891. // The first axis to reduce to a scalar -- may be negative to index from the
  892. // end (e.g., -1 for the last axis).
  893. // (Currently, only reduction along ALL "tail" axes is supported; reduction
  894. // of axis M through N, where N < num_axes - 1, is unsupported.)
  895. // Suppose we have an n-axis bottom Blob with shape:
  896. // (d0, d1, d2, ..., d(m-1), dm, d(m+1), ..., d(n-1)).
  897. // If axis == m, the output Blob will have shape
  898. // (d0, d1, d2, ..., d(m-1)),
  899. // and the ReductionOp operation is performed (d0 * d1 * d2 * ... * d(m-1))
  900. // times, each including (dm * d(m+1) * ... * d(n-1)) individual data.
  901. // If axis == 0 (the default), the output Blob always has the empty shape
  902. // (count 1), performing reduction across the entire input --
  903. // often useful for creating new loss functions.
  904. optional int32 axis = 2 [default = 0];
  905. optional float coeff = 3 [default = 1.0]; // coefficient for output
  906. }
  907. // Message that stores parameters used by ReLULayer
  908. message ReLUParameter {
  909. // Allow non-zero slope for negative inputs to speed up optimization
  910. // Described in:
  911. // Maas, A. L., Hannun, A. Y., & Ng, A. Y. (2013). Rectifier nonlinearities
  912. // improve neural network acoustic models. In ICML Workshop on Deep Learning
  913. // for Audio, Speech, and Language Processing.
  914. optional float negative_slope = 1 [default = 0];
  915. enum Engine {
  916. DEFAULT = 0;
  917. CAFFE = 1;
  918. CUDNN = 2;
  919. }
  920. optional Engine engine = 2 [default = DEFAULT];
  921. }
  922. message ReshapeParameter {
  923. // Specify the output dimensions. If some of the dimensions are set to 0,
  924. // the corresponding dimension from the bottom layer is used (unchanged).
  925. // Exactly one dimension may be set to -1, in which case its value is
  926. // inferred from the count of the bottom blob and the remaining dimensions.
  927. // For example, suppose we want to reshape a 2D blob "input" with shape 2 x 8:
  928. //
  929. // layer {
  930. // type: "Reshape" bottom: "input" top: "output"
  931. // reshape_param { ... }
  932. // }
  933. //
  934. // If "input" is 2D with shape 2 x 8, then the following reshape_param
  935. // specifications are all equivalent, producing a 3D blob "output" with shape
  936. // 2 x 2 x 4:
  937. //
  938. // reshape_param { shape { dim: 2 dim: 2 dim: 4 } }
  939. // reshape_param { shape { dim: 0 dim: 2 dim: 4 } }
  940. // reshape_param { shape { dim: 0 dim: 2 dim: -1 } }
  941. // reshape_param { shape { dim: -1 dim: 0 dim: 2 } }
  942. //
  943. optional BlobShape shape = 1;
  944. // axis and num_axes control the portion of the bottom blob's shape that are
  945. // replaced by (included in) the reshape. By default (axis == 0 and
  946. // num_axes == -1), the entire bottom blob shape is included in the reshape,
  947. // and hence the shape field must specify the entire output shape.
  948. //
  949. // axis may be non-zero to retain some portion of the beginning of the input
  950. // shape (and may be negative to index from the end; e.g., -1 to begin the
  951. // reshape after the last axis, including nothing in the reshape,
  952. // -2 to include only the last axis, etc.).
  953. //
  954. // For example, suppose "input" is a 2D blob with shape 2 x 8.
  955. // Then the following ReshapeLayer specifications are all equivalent,
  956. // producing a blob "output" with shape 2 x 2 x 4:
  957. //
  958. // reshape_param { shape { dim: 2 dim: 2 dim: 4 } }
  959. // reshape_param { shape { dim: 2 dim: 4 } axis: 1 }
  960. // reshape_param { shape { dim: 2 dim: 4 } axis: -3 }
  961. //
  962. // num_axes specifies the extent of the reshape.
  963. // If num_axes >= 0 (and axis >= 0), the reshape will be performed only on
  964. // input axes in the range [axis, axis+num_axes].
  965. // num_axes may also be -1, the default, to include all remaining axes
  966. // (starting from axis).
  967. //
  968. // For example, suppose "input" is a 2D blob with shape 2 x 8.
  969. // Then the following ReshapeLayer specifications are equivalent,
  970. // producing a blob "output" with shape 1 x 2 x 8.
  971. //
  972. // reshape_param { shape { dim: 1 dim: 2 dim: 8 } }
  973. // reshape_param { shape { dim: 1 dim: 2 } num_axes: 1 }
  974. // reshape_param { shape { dim: 1 } num_axes: 0 }
  975. //
  976. // On the other hand, these would produce output blob shape 2 x 1 x 8:
  977. //
  978. // reshape_param { shape { dim: 2 dim: 1 dim: 8 } }
  979. // reshape_param { shape { dim: 1 } axis: 1 num_axes: 0 }
  980. //
  981. optional int32 axis = 2 [default = 0];
  982. optional int32 num_axes = 3 [default = -1];
  983. }
  984. message ScaleParameter {
  985. // The first axis of bottom[0] (the first input Blob) along which to apply
  986. // bottom[1] (the second input Blob). May be negative to index from the end
  987. // (e.g., -1 for the last axis).
  988. //
  989. // For example, if bottom[0] is 4D with shape 100x3x40x60, the output
  990. // top[0] will have the same shape, and bottom[1] may have any of the
  991. // following shapes (for the given value of axis):
  992. // (axis == 0 == -4) 100; 100x3; 100x3x40; 100x3x40x60
  993. // (axis == 1 == -3) 3; 3x40; 3x40x60
  994. // (axis == 2 == -2) 40; 40x60
  995. // (axis == 3 == -1) 60
  996. // Furthermore, bottom[1] may have the empty shape (regardless of the value of
  997. // "axis") -- a scalar multiplier.
  998. optional int32 axis = 1 [default = 1];
  999. // (num_axes is ignored unless just one bottom is given and the scale is
  1000. // a learned parameter of the layer. Otherwise, num_axes is determined by the
  1001. // number of axes by the second bottom.)
  1002. // The number of axes of the input (bottom[0]) covered by the scale
  1003. // parameter, or -1 to cover all axes of bottom[0] starting from `axis`.
  1004. // Set num_axes := 0, to multiply with a zero-axis Blob: a scalar.
  1005. optional int32 num_axes = 2 [default = 1];
  1006. // (filler is ignored unless just one bottom is given and the scale is
  1007. // a learned parameter of the layer.)
  1008. // The initialization for the learned scale parameter.
  1009. // Default is the unit (1) initialization, resulting in the ScaleLayer
  1010. // initially performing the identity operation.
  1011. optional FillerParameter filler = 3;
  1012. // Whether to also learn a bias (equivalent to a ScaleLayer+BiasLayer, but
  1013. // may be more efficient). Initialized with bias_filler (defaults to 0).
  1014. optional bool bias_term = 4 [default = false];
  1015. optional FillerParameter bias_filler = 5;
  1016. }
  1017. message SegAccuracyParameter {
  1018. enum AccuracyMetric {
  1019. PixelAccuracy = 0;
  1020. ClassAccuracy = 1;
  1021. PixelIOU = 2;
  1022. }
  1023. optional AccuracyMetric metric = 1 [default = PixelAccuracy];
  1024. // will ignore pixels with this value when computing accuracy
  1025. repeated int32 ignore_label = 2;
  1026. optional bool reset = 3 [default = true];
  1027. }
  1028. message SigmoidParameter {
  1029. enum Engine {
  1030. DEFAULT = 0;
  1031. CAFFE = 1;
  1032. CUDNN = 2;
  1033. }
  1034. optional Engine engine = 1 [default = DEFAULT];
  1035. }
  1036. message SliceParameter {
  1037. // The axis along which to slice -- may be negative to index from the end
  1038. // (e.g., -1 for the last axis).
  1039. // By default, SliceLayer concatenates blobs along the "channels" axis (1).
  1040. optional int32 axis = 3 [default = 1];
  1041. repeated uint32 slice_point = 2;
  1042. // DEPRECATED: alias for "axis" -- does not support negative indexing.
  1043. optional uint32 slice_dim = 1 [default = 1];
  1044. }
  1045. // Message that stores parameters used by SoftmaxLayer, SoftmaxWithLossLayer
  1046. message SoftmaxParameter {
  1047. enum Engine {
  1048. DEFAULT = 0;
  1049. CAFFE = 1;
  1050. CUDNN = 2;
  1051. }
  1052. optional Engine engine = 1 [default = DEFAULT];
  1053. // The axis along which to perform the softmax -- may be negative to index
  1054. // from the end (e.g., -1 for the last axis).
  1055. // Any other axes will be evaluated as independent softmaxes.
  1056. optional int32 axis = 2 [default = 1];
  1057. }
  1058. message TanHParameter {
  1059. enum Engine {
  1060. DEFAULT = 0;
  1061. CAFFE = 1;
  1062. CUDNN = 2;
  1063. }
  1064. optional Engine engine = 1 [default = DEFAULT];
  1065. }
  1066. // Message that stores parameters used by TileLayer
  1067. message TileParameter {
  1068. // The index of the axis to tile.
  1069. optional int32 axis = 1 [default = 1];
  1070. // The number of copies (tiles) of the blob to output.
  1071. optional int32 tiles = 2;
  1072. }
  1073. // Message that stores parameters used by ThresholdLayer
  1074. message ThresholdParameter {
  1075. optional float threshold = 1 [default = 0]; // Strictly positive values
  1076. }
  1077. message UniqueLabelParameter {
  1078. required int32 max_labels = 1;
  1079. repeated int32 ignore_label = 2;
  1080. repeated float force_label = 3;
  1081. }
  1082. message WindowDataParameter {
  1083. // Specify the data source.
  1084. optional string source = 1;
  1085. // For data pre-processing, we can do simple scaling and subtracting the
  1086. // data mean, if provided. Note that the mean subtraction is always carried
  1087. // out before scaling.
  1088. optional float scale = 2 [default = 1];
  1089. optional string mean_file = 3;
  1090. // Specify the batch size.
  1091. optional uint32 batch_size = 4;
  1092. // Specify if we would like to randomly crop an image.
  1093. optional uint32 crop_size = 5 [default = 0];
  1094. // Specify if we want to randomly mirror data.
  1095. optional bool mirror = 6 [default = false];
  1096. // Foreground (object) overlap threshold
  1097. optional float fg_threshold = 7 [default = 0.5];
  1098. // Background (non-object) overlap threshold
  1099. optional float bg_threshold = 8 [default = 0.5];
  1100. // Fraction of batch that should be foreground objects
  1101. optional float fg_fraction = 9 [default = 0.25];
  1102. // Amount of contextual padding to add around a window
  1103. // (used only by the window_data_layer)
  1104. optional uint32 context_pad = 10 [default = 0];
  1105. // Mode for cropping out a detection window
  1106. // warp: cropped window is warped to a fixed size and aspect ratio
  1107. // square: the tightest square around the window is cropped
  1108. optional string crop_mode = 11 [default = "warp"];
  1109. // cache_images: will load all images in memory for faster access
  1110. optional bool cache_images = 12 [default = false];
  1111. // append root_folder to locate images
  1112. optional string root_folder = 13 [default = ""];
  1113. }
  1114. message SPPParameter {
  1115. enum PoolMethod {
  1116. MAX = 0;
  1117. AVE = 1;
  1118. STOCHASTIC = 2;
  1119. }
  1120. optional uint32 pyramid_height = 1;
  1121. optional PoolMethod pool = 2 [default = MAX]; // The pooling method
  1122. enum Engine {
  1123. DEFAULT = 0;
  1124. CAFFE = 1;
  1125. CUDNN = 2;
  1126. }
  1127. optional Engine engine = 6 [default = DEFAULT];
  1128. }
  1129. // DEPRECATED: use LayerParameter.
  1130. message V1LayerParameter {
  1131. repeated string bottom = 2;
  1132. repeated string top = 3;
  1133. optional string name = 4;
  1134. repeated NetStateRule include = 32;
  1135. repeated NetStateRule exclude = 33;
  1136. enum LayerType {
  1137. NONE = 0;
  1138. ABSVAL = 35;
  1139. ACCURACY = 1;
  1140. ARGMAX = 30;
  1141. BNLL = 2;
  1142. CONCAT = 3;
  1143. CONTRASTIVE_LOSS = 37;
  1144. CONVOLUTION = 4;
  1145. DATA = 5;
  1146. DECONVOLUTION = 39;
  1147. DROPOUT = 6;
  1148. DUMMY_DATA = 32;
  1149. EUCLIDEAN_LOSS = 7;
  1150. ELTWISE = 25;
  1151. EXP = 38;
  1152. FLATTEN = 8;
  1153. HDF5_DATA = 9;
  1154. HDF5_OUTPUT = 10;
  1155. HINGE_LOSS = 28;
  1156. IM2COL = 11;
  1157. IMAGE_DATA = 12;
  1158. INFOGAIN_LOSS = 13;
  1159. INNER_PRODUCT = 14;
  1160. LRN = 15;
  1161. MEMORY_DATA = 29;
  1162. MULTINOMIAL_LOGISTIC_LOSS = 16;
  1163. MVN = 34;
  1164. POOLING = 17;
  1165. POWER = 26;
  1166. RELU = 18;
  1167. SIGMOID = 19;
  1168. SIGMOID_CROSS_ENTROPY_LOSS = 27;
  1169. SILENCE = 36;
  1170. SOFTMAX = 20;
  1171. SOFTMAX_LOSS = 21;
  1172. SPLIT = 22;
  1173. SLICE = 33;
  1174. TANH = 23;
  1175. WINDOW_DATA = 24;
  1176. THRESHOLD = 31;
  1177. }
  1178. optional LayerType type = 5;
  1179. repeated BlobProto blobs = 6;
  1180. repeated string param = 1001;
  1181. repeated DimCheckMode blob_share_mode = 1002;
  1182. enum DimCheckMode {
  1183. STRICT = 0;
  1184. PERMISSIVE = 1;
  1185. }
  1186. repeated float blobs_lr = 7;
  1187. repeated float weight_decay = 8;
  1188. repeated float loss_weight = 35;
  1189. optional AccuracyParameter accuracy_param = 27;
  1190. optional ArgMaxParameter argmax_param = 23;
  1191. optional ConcatParameter concat_param = 9;
  1192. optional ContrastiveLossParameter contrastive_loss_param = 40;
  1193. optional ConvolutionParameter convolution_param = 10;
  1194. optional DataParameter data_param = 11;
  1195. optional DropoutParameter dropout_param = 12;
  1196. optional DummyDataParameter dummy_data_param = 26;
  1197. optional EltwiseParameter eltwise_param = 24;
  1198. optional ExpParameter exp_param = 41;
  1199. optional HDF5DataParameter hdf5_data_param = 13;
  1200. optional HDF5OutputParameter hdf5_output_param = 14;
  1201. optional HingeLossParameter hinge_loss_param = 29;
  1202. optional ImageDataParameter image_data_param = 15;
  1203. optional InfogainLossParameter infogain_loss_param = 16;
  1204. optional InnerProductParameter inner_product_param = 17;
  1205. optional LRNParameter lrn_param = 18;
  1206. optional MemoryDataParameter memory_data_param = 22;
  1207. optional MVNParameter mvn_param = 34;
  1208. optional PoolingParameter pooling_param = 19;
  1209. optional PowerParameter power_param = 21;
  1210. optional ReLUParameter relu_param = 30;
  1211. optional SigmoidParameter sigmoid_param = 38;
  1212. optional SoftmaxParameter softmax_param = 39;
  1213. optional SliceParameter slice_param = 31;
  1214. optional TanHParameter tanh_param = 37;
  1215. optional ThresholdParameter threshold_param = 25;
  1216. optional WindowDataParameter window_data_param = 20;
  1217. optional TransformationParameter transform_param = 36;
  1218. optional LossParameter loss_param = 42;
  1219. optional V0LayerParameter layer = 1;
  1220. }
  1221. // DEPRECATED: V0LayerParameter is the old way of specifying layer parameters
  1222. // in Caffe. We keep this message type around for legacy support.
  1223. message V0LayerParameter {
  1224. optional string name = 1; // the layer name
  1225. optional string type = 2; // the string to specify the layer type
  1226. // Parameters to specify layers with inner products.
  1227. optional uint32 num_output = 3; // The number of outputs for the layer
  1228. optional bool biasterm = 4 [default = true]; // whether to have bias terms
  1229. optional FillerParameter weight_filler = 5; // The filler for the weight
  1230. optional FillerParameter bias_filler = 6; // The filler for the bias
  1231. optional uint32 pad = 7 [default = 0]; // The padding size
  1232. optional uint32 kernelsize = 8; // The kernel size
  1233. optional uint32 group = 9 [default = 1]; // The group size for group conv
  1234. optional uint32 stride = 10 [default = 1]; // The stride
  1235. enum PoolMethod {
  1236. MAX = 0;
  1237. AVE = 1;
  1238. STOCHASTIC = 2;
  1239. }
  1240. optional PoolMethod pool = 11 [default = MAX]; // The pooling method
  1241. optional float dropout_ratio = 12 [default = 0.5]; // dropout ratio
  1242. optional uint32 local_size = 13 [default = 5]; // for local response norm
  1243. optional float alpha = 14 [default = 1.]; // for local response norm
  1244. optional float beta = 15 [default = 0.75]; // for local response norm
  1245. optional float k = 22 [default = 1.];
  1246. // For data layers, specify the data source
  1247. optional string source = 16;
  1248. // For data pre-processing, we can do simple scaling and subtracting the
  1249. // data mean, if provided. Note that the mean subtraction is always carried
  1250. // out before scaling.
  1251. optional float scale = 17 [default = 1];
  1252. optional string meanfile = 18;
  1253. // For data layers, specify the batch size.
  1254. optional uint32 batchsize = 19;
  1255. // For data layers, specify if we would like to randomly crop an image.
  1256. optional uint32 cropsize = 20 [default = 0];
  1257. // For data layers, specify if we want to randomly mirror data.
  1258. optional bool mirror = 21 [default = false];
  1259. // The blobs containing the numeric parameters of the layer
  1260. repeated BlobProto blobs = 50;
  1261. // The ratio that is multiplied on the global learning rate. If you want to
  1262. // set the learning ratio for one blob, you need to set it for all blobs.
  1263. repeated float blobs_lr = 51;
  1264. // The weight decay that is multiplied on the global weight decay.
  1265. repeated float weight_decay = 52;
  1266. // The rand_skip variable is for the data layer to skip a few data points
  1267. // to avoid all asynchronous sgd clients to start at the same point. The skip
  1268. // point would be set as rand_skip * rand(0,1). Note that rand_skip should not
  1269. // be larger than the number of keys in the database.
  1270. optional uint32 rand_skip = 53 [default = 0];
  1271. // Fields related to detection (det_*)
  1272. // foreground (object) overlap threshold
  1273. optional float det_fg_threshold = 54 [default = 0.5];
  1274. // background (non-object) overlap threshold
  1275. optional float det_bg_threshold = 55 [default = 0.5];
  1276. // Fraction of batch that should be foreground objects
  1277. optional float det_fg_fraction = 56 [default = 0.25];
  1278. // optional bool OBSOLETE_can_clobber = 57 [default = true];
  1279. // Amount of contextual padding to add around a window
  1280. // (used only by the window_data_layer)
  1281. optional uint32 det_context_pad = 58 [default = 0];
  1282. // Mode for cropping out a detection window
  1283. // warp: cropped window is warped to a fixed size and aspect ratio
  1284. // square: the tightest square around the window is cropped
  1285. optional string det_crop_mode = 59 [default = "warp"];
  1286. // For ReshapeLayer, one needs to specify the new dimensions.
  1287. optional int32 new_num = 60 [default = 0];
  1288. optional int32 new_channels = 61 [default = 0];
  1289. optional int32 new_height = 62 [default = 0];
  1290. optional int32 new_width = 63 [default = 0];
  1291. // Whether or not ImageLayer should shuffle the list of files at every epoch.
  1292. // It will also resize images if new_height or new_width are not zero.
  1293. optional bool shuffle_images = 64 [default = false];
  1294. // For ConcatLayer, one needs to specify the dimension for concatenation, and
  1295. // the other dimensions must be the same for all the bottom blobs.
  1296. // By default it will concatenate blobs along the channels dimension.
  1297. optional uint32 concat_dim = 65 [default = 1];
  1298. optional HDF5OutputParameter hdf5_output_param = 1001;
  1299. }
  1300. message PReLUParameter {
  1301. // Parametric ReLU described in K. He et al, Delving Deep into Rectifiers:
  1302. // Surpassing Human-Level Performance on ImageNet Classification, 2015.
  1303. // Initial value of a_i. Default is a_i=0.25 for all i.
  1304. optional FillerParameter filler = 1;
  1305. // Whether or not slope paramters are shared across channels.
  1306. optional bool channel_shared = 2 [default = false];
  1307. }