File: ConcatImpl.hpp

package info (click to toggle)
libxsmm 1.17-4
  • links: PTS, VCS
  • area: main
  • in suites: sid, trixie
  • size: 14,976 kB
  • sloc: ansic: 119,587; cpp: 27,680; fortran: 9,179; sh: 5,765; makefile: 5,040; pascal: 2,312; python: 1,812; f90: 1,773
file content (83 lines) | stat: -rw-r--r-- 2,424 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
/******************************************************************************
* Copyright (c) Intel Corporation - All rights reserved.                      *
* This file is part of the LIBXSMM library.                                   *
*                                                                             *
* For information on the license, see the LICENSE file.                       *
* Further information: https://github.com/hfp/libxsmm/                        *
* SPDX-License-Identifier: BSD-3-Clause                                       *
******************************************************************************/
/* Sasikanth Avancha, Dhiraj Kalamkar (Intel Corp.)
******************************************************************************/


#pragma once

#include <omp.h>
#include <assert.h>
#include <sys/time.h>
#include <string.h>
#include "common.hpp"
#include "check.hpp"
#include "Tensor.hpp"

typedef struct {
  int nOutput;
  vector<int> nInput;
  int bdims;
  int tdims;
  int iHeight;
  int iWidth;
  int oHeight;
  int oWidth;
  int batch_size;
  int axis;
  int algType;
  int data_type;
  int num_threads;
} ConcatImplParams;


class ConcatImpl
{
  protected:
    ConcatImplParams *gp;
    int engine;
    TensorLayoutType top_layout_type;
    vector<TensorLayoutType> gbot_layout_type;
    void *top_layout;
    vector<void*> gbot_layout;
    int top_compute_engine=-1;
    vector<int> bot_compute_engine;
    string next_ntype, nname;

  public:
    ConcatImpl(ConcatImplParams* gp_, int engine_): gp(gp_), engine(engine_) {}

    void set_top_compute_engine(int e) { top_compute_engine = e;}
    void set_bot_compute_engine(int e) { bot_compute_engine.push_back(e);}
    void set_next_node_type(string s) { next_ntype = s; }
    void set_node_name(string s) { nname = s; }

    virtual void forwardPropagate(vector<TensorBuf *>& inp, TensorBuf *outp, int tid) = 0;
    virtual void backPropagate(TensorBuf* deloutp, vector<TensorBuf*>& delinp, int tid) = 0;

    virtual void forwardPropagate(vector<TensorBuf*>& inp, TensorBuf* outp)
    {
      switch(engine)
      {
        case XSMM:
          forwardPropagate(inp, outp, 0);
          break;
      }
    }

    virtual void backPropagate(TensorBuf* deloutp, vector<TensorBuf*>& delinp)
    {
      switch(engine)
      {
        case XSMM:
          backPropagate(deloutp, delinp, 0);
          break;
      }
    }
};