Skip to content

Commit

Permalink
Merge pull request BVLC#1 from hammer2505/master
Browse files Browse the repository at this point in the history
add caffe layers for apollo
  • Loading branch information
xiaoxq committed Oct 15, 2018
2 parents 99bd997 + 1cd66ba commit faf90af
Show file tree
Hide file tree
Showing 25 changed files with 6,073 additions and 0 deletions.
97 changes: 97 additions & 0 deletions include/caffe/layers/dfmb_psroi_align_layer.hpp
@@ -0,0 +1,97 @@
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/

#ifndef CAFFE_DFMB_PSROI_ALIGN_LAYER_HPP_
#define CAFFE_DFMB_PSROI_ALIGN_LAYER_HPP_

#include <vector>

#include "caffe/blob.hpp"
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"

namespace caffe {

template <typename Dtype>
class DFMBPSROIAlignLayer : public Layer<Dtype> {
public:
explicit DFMBPSROIAlignLayer(const LayerParameter& param)
: Layer<Dtype>(param) {}
virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);

virtual inline const char* type() const { return "DFMBPSROIAlign"; }

virtual inline int MinBottomBlobs() const { return 2; }
virtual inline int MaxBottomBlobs() const { return 3; }
virtual inline int MinTopBlobs() const { return 1; }
virtual inline int MaxTopBlobs() const { return 1; }

protected:
virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom);

Dtype heat_map_a_;
Dtype heat_map_b_;
Dtype pad_ratio_;

int output_dim_;
bool no_trans_;
Dtype trans_std_;
int sample_per_part_;
int group_height_;
int group_width_;
int pooled_height_;
int pooled_width_;
int part_height_;
int part_width_;

int channels_;
int height_;
int width_;

Blob<Dtype> top_count_;

};

} // namespace caffe

#endif // CAFFE_DFMB_PSROI_ALIGN_LAYER_HPP_
97 changes: 97 additions & 0 deletions include/caffe/layers/permute_layer.hpp
@@ -0,0 +1,97 @@
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/

#ifndef CAFFE_PERMUTE_LAYER_HPP_
#define CAFFE_PERMUTE_LAYER_HPP_

#include <vector>

#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/proto/caffe.pb.h"

namespace caffe {

/**
* @brief Permute the input blob by changing the memory order of the data.
*/

// The main function which does the permute.
template<typename Dtype>
void Permute(const int count, Dtype *bottom_data, const bool forward,
const int *permute_order, const int *old_steps,
const int *new_steps,
const int num_axes, Dtype *top_data);

template<typename Dtype>
class PermuteLayer : public Layer<Dtype> {
public:
explicit PermuteLayer(const LayerParameter &param)
: Layer<Dtype>(param) {}

virtual void LayerSetUp(const std::vector<Blob<Dtype> *> &bottom,
const std::vector<Blob<Dtype> *> &top);

virtual void Reshape(const std::vector<Blob<Dtype> *> &bottom,
const std::vector<Blob<Dtype> *> &top);

virtual inline const char *type() const { return "Permute"; }

virtual inline int ExactNumBottomBlobs() const { return 1; }

virtual inline int ExactNumTopBlobs() const { return 1; }

protected:
virtual void Forward_cpu(const std::vector<Blob<Dtype> *> &bottom,
const std::vector<Blob<Dtype> *> &top);

virtual void Forward_gpu(const std::vector<Blob<Dtype> *> &bottom,
const std::vector<Blob<Dtype> *> &top);

virtual void Backward_cpu(const std::vector<Blob<Dtype> *> &top,
const std::vector<bool> &propagate_down,
const std::vector<Blob<Dtype> *> &bottom);

virtual void Backward_gpu(const std::vector<Blob<Dtype> *> &top,
const std::vector<bool> &propagate_down,
const std::vector<Blob<Dtype> *> &bottom);

int num_axes_;
bool need_permute_;

// Use Blob because it is convenient to be accessible in .cu file.
Blob<int> permute_order_;
Blob<int> old_steps_;
Blob<int> new_steps_;
};

} // namespace caffe

#endif // CAFFE_PERMUTE_LAYER_HPP_
91 changes: 91 additions & 0 deletions include/caffe/layers/rcnn_proposal_layer.hpp
@@ -0,0 +1,91 @@
/******************************************************************************
* Copyright 2018 The Apollo Authors. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*****************************************************************************/

#ifndef CAFFE_RCNN_PROPOSAL_LAYER_HPP_
#define CAFFE_RCNN_PROPOSAL_LAYER_HPP_

#include <string>
#include <utility>
#include <vector>

#include "caffe/blob.hpp"
#include "caffe/layer.hpp"
#include "caffe/layers/roi_output_ssd_layer.hpp"
#include "caffe/proto/caffe.pb.h"
#include "caffe/common.hpp"
#include "caffe/syncedmem.hpp"

namespace caffe {
template <typename Dtype>
class RCNNProposalLayer : public ROIOutputSSDLayer <Dtype>
{
public:
explicit RCNNProposalLayer(const LayerParameter& param) :
ROIOutputSSDLayer<Dtype>(param) {}
virtual ~RCNNProposalLayer(){};

virtual void LayerSetUp(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Reshape(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual inline const char* type() const { return "RCNNProposal"; }
virtual inline int MinBottomBlobs() const { return 4; }
virtual inline int MaxBottomBlobs() const { return 4; }
virtual inline int MinTopBlobs() const { return 0; }
virtual inline int MaxTopBlobs() const { return -1; }
virtual inline int ExactNumTopBlobs() const { return -1; }

virtual void Forward_cpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);
virtual void Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top);

virtual void Backward_cpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom);
virtual void Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom);

protected:
int rois_dim_;
#ifndef CPU_ONLY
Blob<Dtype> dt_conf_;
Blob<Dtype> thr_cls_;
Blob<Dtype> dt_bbox_;
boost::shared_ptr<SyncedMemory> overlapped_;
boost::shared_ptr<SyncedMemory> idx_sm_;
cudaStream_t stream_;
#endif
};

} // namespace caffe

#endif // CAFFE_RCNN_PROPOSAL_LAYER_HPP_

0 comments on commit faf90af

Please sign in to comment.