1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131
|
/******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved. *
* This file is part of the LIBXSMM library. *
* *
* For information on the license, see the LICENSE file. *
* Further information: https://github.com/hfp/libxsmm/ *
* SPDX-License-Identifier: BSD-3-Clause *
******************************************************************************/
/* Sasikanth Avancha, Dhiraj Kalamkar (Intel Corp.)
******************************************************************************/
#pragma once
#include <string>
#include <stdio.h>
#include "Node.hpp"
#include "Engine.hpp"
#include "Params.hpp"
#include "Tensor.hpp"
#include "proto/gxm.pb.h"
#include "check.hpp"
#include "io.hpp"
using namespace std;
using namespace gxm;
typedef struct {
int nInput, nOutput;
int iHeight, iWidth;
int oHeight, oWidth;
int batch_size;
int algType, data_type;
int num_threads;
}DropoutImplParams;
class DropoutParams : public NNParams
{
public:
DropoutParams(void) {}
virtual ~DropoutParams(void) {}
void set_dropout_ratio(float r) { dropout_ratio_ = r; }
float get_dropout_ratio() { return dropout_ratio_; }
void set_data_type(int t) { data_type_ = t; }
int get_data_type() { return data_type_; }
void set_compute_engine(int ce) { compute_engine_ = ce; }
int get_compute_engine() { return compute_engine_; }
void set_algo_type(int at) { algotype_ = at; }
int get_algo_type() { return algotype_; }
protected:
float dropout_ratio_;
int compute_engine_, algotype_, data_type_;
};
static MLParams* parseDropoutParams(NodeParameter* np)
{
DropoutParams* dp = new DropoutParams();
// Set name of node
string str = np->name();
assert(!str.empty());
dp->set_node_name(str);
//Set node type (Dropout)
str = np->type();
assert(!str.empty());
dp->set_node_type(str);
//Set tensor names
assert(np->bottom_size() == 1);
assert(!np->bottom(0).empty());
dp->set_bottom_names(np->bottom(0));
assert(np->top_size() == 1);
assert(!np->top(0).empty());
dp->set_top_names(np->top(0));
//Set Mode for the node
assert((np->mode() == TRAIN) || (np->mode() == TEST));
dp->set_mode(np->mode());
//Set backprop needed/not needed flag for this node
dp->set_bprop_flag(np->propagate_down());
DropoutParameter p = np->dropout_param();
dp->set_dropout_ratio(p.dropout_ratio());
dp->set_data_type(p.data_type());
dp->set_compute_engine(p.engine());
dp->set_algo_type(p.algotype());
return dp;
}
class DropoutNode : public NNNode
{
public:
DropoutNode(DropoutParams* p, MLEngine* e);
virtual ~DropoutNode(void) {}
protected:
void forwardPropagate();
void backPropagate();
void configure(int engine);
void shape_setzero(Shape* s)
{
for(int i=0; i<MAX_DIMS; i++)
s->dims[i] = 0;
}
Tensor *tenTop_; // Output tensor pointer
Tensor *tenBot_; // Input tensor pointer
void *tenMask_;
TensorBuf *tenBotDiff_, *tenBotData_; // Data & Gradients with respect to input
TensorBuf *tenTopData_; // Output buffer
unsigned int *seeds; // Mask and seeds buffers
Shape ts_;
float threshold_, scale_;
unsigned int uint_threshold_;
MLEngine* eptr_;
DropoutImplParams gparams_;
};
|