File: GenRotate.cpp

package info (click to toggle)
intel-graphics-compiler 1.0.12504.6-1%2Bdeb12u1
  • links: PTS, VCS
  • area: main
  • in suites: bookworm
  • size: 83,912 kB
  • sloc: cpp: 910,147; lisp: 202,655; ansic: 15,197; python: 4,025; yacc: 2,241; lex: 1,570; pascal: 244; sh: 104; makefile: 25
file content (326 lines) | stat: -rw-r--r-- 9,704 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
/*========================== begin_copyright_notice ============================

Copyright (C) 2020-2021 Intel Corporation

SPDX-License-Identifier: MIT

============================= end_copyright_notice ===========================*/

#include "Compiler/GenRotate.hpp"
#include "Compiler/CISACodeGen/ShaderCodeGen.hpp"
#include "Compiler/CodeGenPublic.h"
#include "Compiler/IGCPassSupport.h"
#include "common/LLVMWarningsPush.hpp"
#include <llvm/IR/Instructions.h>
#include <llvm/IR/BasicBlock.h>
#include <llvm/IR/Function.h>
#include <llvm/IR/Module.h>
#include <llvm/IR/IRBuilder.h>
#include <llvm/IR/PatternMatch.h>
#include <llvm/Support/Debug.h>
#include "common/LLVMWarningsPop.hpp"

using namespace llvm;
using namespace IGC;

namespace
{
    class GenRotate : public FunctionPass
    {
    public:
        static char ID; // Pass identification, replacement for typeid

        GenRotate() : FunctionPass(ID)
        {
            initializeGenRotatePass(*PassRegistry::getPassRegistry());
        }

        bool runOnFunction(Function& F) override;

        void getAnalysisUsage(AnalysisUsage& AU) const override
        {
            AU.addRequired<MetaDataUtilsWrapper>();
            AU.addRequired<CodeGenContextWrapper>();
            AU.setPreservesCFG();
        }

    private:
        CodeGenContext* m_Ctx = nullptr;
        Function* m_F = nullptr;
        const DataLayout* m_DL = nullptr;

        // Indicate supported integer width
        bool m_SupportInt8 = false;
        bool m_SupportInt16 = false;
        bool m_SupportInt32 = false;
        bool m_SupportInt64 = false;

        bool m_Changed = false;
        void matchRotate(Instruction* I);
    };
}  // namespace


#define PASS_FLAG "igc-genrotate"
#define PASS_DESCRIPTION "Generate rotate with llvm funnel shift intrinsic"
#define PASS_CFG_ONLY false
#define PASS_ANALYSIS false
IGC_INITIALIZE_PASS_BEGIN(GenRotate, PASS_FLAG, PASS_DESCRIPTION, PASS_CFG_ONLY, PASS_ANALYSIS)
IGC_INITIALIZE_PASS_DEPENDENCY(MetaDataUtilsWrapper)
IGC_INITIALIZE_PASS_DEPENDENCY(CodeGenContextWrapper)
IGC_INITIALIZE_PASS_END(GenRotate, PASS_FLAG, PASS_DESCRIPTION, PASS_CFG_ONLY, PASS_ANALYSIS)

char GenRotate::ID = 0;

FunctionPass* IGC::createGenRotatePass()
{
    return new GenRotate();
}

bool GenRotate::runOnFunction(Function& F)
{
    CodeGenContextWrapper* pCtxWrapper = &getAnalysis<CodeGenContextWrapper>();
    m_Ctx = pCtxWrapper->getCodeGenContext();

    if (!m_Ctx->platform.supportRotateInstruction()) {
        return false;
    }

    m_SupportInt8 = false;
    m_SupportInt16 = true;
    m_SupportInt32 = true;
    m_SupportInt64 = false;
    m_SupportInt64 = m_Ctx->platform.supportQWRotateInstructions();

    m_Changed = false;
    for (auto BBI = F.begin(), BBE = F.end(); BBI != BBE; ++BBI)
    {
        // scan bottom-up
        BasicBlock* BB = &*BBI;
        for (auto II = BB->rbegin(), IE = BB->rend(); II != IE; /*empty */)
        {
            Instruction* I = &*II;

            // Increase II in case I is deleted in matchRotate()
            ++II;

            matchRotate(I);
        }
    }
    return m_Changed;
}

//
//  rol (V, amt) =  (V << amt) | ((unsigned(V) >> (N - amt))
//      where V is of type T (integer) with N bits, and amt is of integer type.
//
//  This function finds the following pattern, note that [insts] denotes that "insts" are optional.
//          [amt = and amt, N-1]
//          high = shl V0, amt
//          [amt0 = sub 0, amt  || amt0 = sub N, amt]
//          [amt0 = and amt0, N-1]
//          low = lshr V1, amt0
//          R = or high, low
//
//      case 0: [ likely, V is i32 or i64]
//          V = V0 (V0 == V1)
//
//      case 1:  [ likely V is i16 or i8]
//          V0 = sext V || zext V
//          V1 = zext V
//          Res = trunc R
//
//          Res's type == V's type
//
//  ror can be handled similarly. Note that
//    ror (x, amt) = ((unsigned)x >> amt) | ( x << (N - amt))
//                 = rol (x, N - amt);
//
void GenRotate::matchRotate(Instruction* I)
{
    // Note that if succesful, I is erased !
    using namespace llvm::PatternMatch;

    if (I->use_empty() || I->getType()->isVectorTy())
    {
        return;
    }

    Instruction* OrInst = nullptr;
    if (I->getOpcode() == Instruction::Trunc)
    {
        if (BinaryOperator* tmp = dyn_cast<BinaryOperator>(I->getOperand(0)))
        {
            if (tmp->getOpcode() == Instruction::Or)
            {
                OrInst = tmp;
            }
        }
    }
    else if (I->getOpcode() == Instruction::Or)
    {
        OrInst = I;
    }

    if (OrInst == nullptr)
    {
        return;
    }

    // Do rotate only if
    //   1) type is supported 16/32/64; and
    //   2) both operands are instructions.
    uint64_t typeWidth = I->getType()->getScalarSizeInBits();
    bool typeWidthSupported =
        ((m_SupportInt8 && typeWidth == 8) || (m_SupportInt16 && typeWidth == 16) ||
         (m_SupportInt32 && typeWidth == 32) || (m_SupportInt64 && typeWidth == 64));
    Instruction* LHS = dyn_cast<Instruction>(OrInst->getOperand(0));
    Instruction* RHS = dyn_cast<Instruction>(OrInst->getOperand(1));
    if (!LHS || !RHS || !typeWidthSupported)
    {
        return;
    }

    // Make adjustment so that LHS is shl.
    if (LHS->getOpcode() == Instruction::LShr)
    {
        Instruction* t = LHS;
        LHS = RHS;
        RHS = t;
    }
    if (LHS->getOpcode() != Instruction::Shl ||
        RHS->getOpcode() != Instruction::LShr)
    {
        return;
    }

    // first: find V
    Value* V0 = LHS->getOperand(0);
    Value* V1 = RHS->getOperand(0);
    Value* V = nullptr;
    if (I->getOpcode() == Instruction::Or)
    {
        if (V0 == V1)
        {
            V = V0;
        }
    }
    else
    {
        Value* X0 = nullptr, * X1 = nullptr;
        if ((match(V0, m_ZExt(m_Value(X0))) || match(V0, m_SExt(m_Value(X0)))) &&
            match(V1, m_ZExt(m_Value(X1))))
        {
            if (X0 == X1 && X0->getType()->getScalarSizeInBits() == typeWidth)
            {
                V = X0;
            }
        }
    }

    if (!V)
    {
        return;
    }

    // Second: find amt
    uint64_t typeMask = typeWidth - 1;
    Value* LAmt = LHS->getOperand(1);
    Value* RAmt = RHS->getOperand(1);
    ConstantInt* C_LAmt = dyn_cast<ConstantInt>(LAmt);
    ConstantInt* C_RAmt = dyn_cast<ConstantInt>(RAmt);
    Value* X0 = nullptr, * X1 = nullptr;
    Value* Amt = nullptr;
    bool isROL = true;
    if (C_LAmt || C_RAmt)
    {
        // If only one of shift-amounts is constant, it cannot be rotate.
        if (C_LAmt && C_RAmt)
        {
            // For shift amount that is beyond the typewidth, the result is
            // undefined. Here, we just use the LSB.
            uint64_t c0 = C_LAmt->getZExtValue() & typeMask;
            uint64_t c1 = C_RAmt->getZExtValue() & typeMask;
            if ((c0 + c1) == typeWidth)
            {
                Amt = LAmt;
                isROL = true;
            }
        }
    }
    else
    {
        if (match(RAmt, m_And(m_Sub(m_Zero(), m_Value(X1)), m_SpecificInt(typeMask))) ||
            match(RAmt, m_And(m_Sub(m_SpecificInt(typeWidth), m_Value(X1)), m_SpecificInt(typeMask))) ||
            match(RAmt, m_Sub(m_Zero(), m_Value(X1))) ||
            match(RAmt, m_Sub(m_SpecificInt(typeWidth), m_Value(X1))))
        {
            if (LAmt == X1 ||
                (match(LAmt, m_And(m_Value(X0), m_SpecificInt(typeMask))) && (X1 == X0)))
            {
                Amt = X1;
                isROL = true;
            }
        }
        if (!Amt &&
            (match(LAmt, m_And(m_Sub(m_Zero(), m_Value(X1)), m_SpecificInt(typeMask))) ||
             match(LAmt, m_And(m_Sub(m_SpecificInt(typeWidth), m_Value(X1)), m_SpecificInt(typeMask))) ||
             match(LAmt, m_Sub(m_Zero(), m_Value(X1))) ||
             match(LAmt, m_Sub(m_SpecificInt(typeWidth), m_Value(X1)))))
        {
            if (RAmt == X1 ||
                (match(RAmt, m_And(m_Value(X0), m_SpecificInt(typeMask))) && (X1 == X0)))
            {
                Amt = X1;
                isROL = false;
            }
        }

        if (Amt)
        {
            Value* X0, * X1, * X2 = nullptr;
            // 1) simple case: amt = typeWidth - X0;   use amt1 as shift amount.
            bool isReverse = match(Amt, m_Sub(m_SpecificInt(typeWidth), m_Value(X0)));

            // 2)   t = 16 - X0 | t = 0 - X0   ; for example,  t is i16/i8, etc
            //      t1 = t & 15
            //      amt = zext t1, i32
            isReverse = isReverse ||
                (match(Amt, m_ZExt(m_Value(X1))) &&
                 match(X1, m_And(m_Value(X2), m_SpecificInt(typeMask))) &&
                 (match(X2, m_Sub(m_SpecificInt(typeWidth), m_Value(X0))) ||
                  match(X2, m_Sub(m_Zero(), m_Value(X0)))));

            if (isReverse)
            {
                Amt = X0;
                isROL = !isROL;
            }
        }
    }

    if (!Amt)
    {
        return;
    }

    // Replace I with llvm.fshl or llvm.fshr
    IRBuilder<> Builder(I);
    // Create zext in case Amt has smaller width than V
    Amt = Builder.CreateZExt(Amt, V->getType());

    Intrinsic::ID rotateID = isROL ? Intrinsic::fshl : Intrinsic::fshr;
    Value* Args[3] = { V, V, Amt };
#if LLVM_VERSION_MAJOR >= 8
    Type* Ty = V->getType();
    CallInst* rotateCall = Builder.CreateIntrinsic(rotateID, Ty, Args, nullptr, "rotate");
#else
    CallInst* rotateCall = Builder.CreateIntrinsic(rotateID, Args, nullptr, "rotate");
#endif
    rotateCall->setDebugLoc(I->getDebugLoc());
    I->replaceAllUsesWith(rotateCall);
    I->eraseFromParent();

    m_Changed = true;
    return;
}