1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125
|
//===- SparseTensorPipelines.cpp - Pipelines for sparse tensor code -------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "mlir/Dialect/SparseTensor/Pipelines/Passes.h"
#include "mlir/Conversion/GPUToNVVM/GPUToNVVMPass.h"
#include "mlir/Conversion/Passes.h"
#include "mlir/Dialect/Arith/Transforms/Passes.h"
#include "mlir/Dialect/Bufferization/Transforms/Bufferize.h"
#include "mlir/Dialect/Bufferization/Transforms/OneShotAnalysis.h"
#include "mlir/Dialect/Bufferization/Transforms/Passes.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/Dialect/GPU/IR/GPUDialect.h"
#include "mlir/Dialect/GPU/Transforms/Passes.h"
#include "mlir/Dialect/LLVMIR/NVVMDialect.h"
#include "mlir/Dialect/Linalg/Passes.h"
#include "mlir/Dialect/MemRef/Transforms/Passes.h"
#include "mlir/Dialect/SparseTensor/IR/SparseTensor.h"
#include "mlir/Dialect/SparseTensor/Transforms/Passes.h"
#include "mlir/Pass/PassManager.h"
#include "mlir/Transforms/Passes.h"
using namespace mlir;
using namespace mlir::sparse_tensor;
/// Return configuration options for One-Shot Bufferize.
static bufferization::OneShotBufferizationOptions
getBufferizationOptions(bool analysisOnly) {
using namespace bufferization;
OneShotBufferizationOptions options;
options.bufferizeFunctionBoundaries = true;
// TODO(springerm): To spot memory leaks more easily, returning dense allocs
// should be disallowed.
options.allowReturnAllocs = true;
options.setFunctionBoundaryTypeConversion(LayoutMapOption::IdentityLayoutMap);
options.unknownTypeConverterFn = [](Value value, Attribute memorySpace,
const BufferizationOptions &options) {
return getMemRefTypeWithStaticIdentityLayout(
cast<TensorType>(value.getType()), memorySpace);
};
if (analysisOnly) {
options.testAnalysisOnly = true;
options.printConflicts = true;
}
return options;
}
//===----------------------------------------------------------------------===//
// Pipeline implementation.
//===----------------------------------------------------------------------===//
void mlir::sparse_tensor::buildSparseCompiler(
OpPassManager &pm, const SparseCompilerOptions &options) {
pm.addNestedPass<func::FuncOp>(createLinalgGeneralizationPass());
pm.addPass(createSparsificationAndBufferizationPass(
getBufferizationOptions(options.testBufferizationAnalysisOnly),
options.sparsificationOptions(), options.sparseTensorConversionOptions(),
options.createSparseDeallocs, options.enableRuntimeLibrary,
options.enableBufferInitialization, options.vectorLength,
/*enableVLAVectorization=*/options.armSVE,
/*enableSIMDIndex32=*/options.force32BitVectorIndices));
if (options.testBufferizationAnalysisOnly)
return;
pm.addNestedPass<func::FuncOp>(createCanonicalizerPass());
pm.addNestedPass<func::FuncOp>(
mlir::bufferization::createFinalizingBufferizePass());
// GPU code generation.
const bool gpuCodegen = options.gpuTriple.hasValue();
if (gpuCodegen) {
pm.addPass(createSparseGPUCodegenPass());
pm.addNestedPass<gpu::GPUModuleOp>(createStripDebugInfoPass());
pm.addNestedPass<gpu::GPUModuleOp>(createConvertSCFToCFPass());
pm.addNestedPass<gpu::GPUModuleOp>(createLowerGpuOpsToNVVMOpsPass());
}
// TODO(springerm): Add sparse support to the BufferDeallocation pass and add
// it to this pipeline.
pm.addNestedPass<func::FuncOp>(createConvertLinalgToLoopsPass());
pm.addNestedPass<func::FuncOp>(createConvertVectorToSCFPass());
pm.addNestedPass<func::FuncOp>(createConvertSCFToCFPass());
pm.addPass(memref::createExpandStridedMetadataPass());
pm.addPass(createLowerAffinePass());
pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
pm.addPass(createFinalizeMemRefToLLVMConversionPass());
pm.addNestedPass<func::FuncOp>(createConvertComplexToStandardPass());
pm.addNestedPass<func::FuncOp>(arith::createArithExpandOpsPass());
pm.addNestedPass<func::FuncOp>(createConvertMathToLLVMPass());
pm.addPass(createConvertMathToLibmPass());
pm.addPass(createConvertComplexToLibmPass());
// Repeat convert-vector-to-llvm.
pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
pm.addPass(createConvertComplexToLLVMPass());
pm.addPass(createConvertVectorToLLVMPass(options.lowerVectorToLLVMOptions()));
pm.addPass(createConvertFuncToLLVMPass());
// Finalize GPU code generation.
if (gpuCodegen) {
#if MLIR_GPU_TO_CUBIN_PASS_ENABLE
pm.addNestedPass<gpu::GPUModuleOp>(createGpuSerializeToCubinPass(
options.gpuTriple, options.gpuChip, options.gpuFeatures));
#endif
pm.addPass(createGpuToLLVMConversionPass());
}
pm.addPass(createReconcileUnrealizedCastsPass());
}
//===----------------------------------------------------------------------===//
// Pipeline registration.
//===----------------------------------------------------------------------===//
void mlir::sparse_tensor::registerSparseTensorPipelines() {
PassPipelineRegistration<SparseCompilerOptions>(
"sparse-compiler",
"The standard pipeline for taking sparsity-agnostic IR using the"
" sparse-tensor type, and lowering it to LLVM IR with concrete"
" representations and algorithms for sparse tensors.",
buildSparseCompiler);
}
|