| 12
 3
 4
 5
 6
 7
 8
 9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
 100
 101
 102
 103
 104
 105
 106
 107
 108
 109
 110
 111
 112
 113
 114
 115
 116
 117
 118
 119
 120
 121
 122
 123
 124
 125
 126
 127
 128
 129
 130
 131
 132
 133
 
 | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs --replace-value-regex "__omp_offloading_[0-9a-z]+_[0-9a-z]+" "reduction_size[.].+[.]" "pl_cond[.].+[.|,]" --prefix-filecheck-ir-name _
// RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux-gnu -fopenmp -x c -emit-llvm %s -o - | FileCheck %s --check-prefix=CHECK1
// RUN: %clang_cc1 -verify -triple powerpc64le-unknown-linux-gnu -fopenmp-simd -x c -emit-llvm %s -o - | FileCheck %s --implicit-check-not="{{__kmpc|__tgt}}"
int a;
void foo() {
  int(*b)[a];
  int *(**c)[a];
#pragma omp parallel if (0)
  b[0][0] = c[0][a][0][a];
}
void bar(int n, int *a) {
  // expected-warning@+1 {{incompatible pointer types initializing 'int (*)[n]' with an expression of type 'int **'}}
  int(*p)[n] = &a;
#pragma omp parallel if(0)
  // expected-warning@+1 {{comparison of distinct pointer types ('int (*)[n]' and 'int **')}}
  if (p == &a) {
  }
}
// CHECK1-LABEL: define {{[^@]+}}@foo
// CHECK1-SAME: () #[[ATTR0:[0-9]+]] {
// CHECK1-NEXT:  entry:
// CHECK1-NEXT:    [[B:%.*]] = alloca i32*, align 8
// CHECK1-NEXT:    [[C:%.*]] = alloca i32***, align 8
// CHECK1-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT:    [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT:    store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
// CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1:[0-9]+]])
// CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* @a, align 4
// CHECK1-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK1-NEXT:    [[TMP3:%.*]] = load i32, i32* @a, align 4
// CHECK1-NEXT:    [[TMP4:%.*]] = zext i32 [[TMP3]] to i64
// CHECK1-NEXT:    call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT:    store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT:    call void @.omp_outlined.(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP2]], i32** [[B]], i64 [[TMP4]], i32**** [[C]]) #[[ATTR2:[0-9]+]]
// CHECK1-NEXT:    call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT:    ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined.
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32** nonnull align 8 dereferenceable(8) [[B:%.*]], i64 [[VLA1:%.*]], i32**** nonnull align 8 dereferenceable(8) [[C:%.*]]) #[[ATTR1:[0-9]+]] {
// CHECK1-NEXT:  entry:
// CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT:    [[B_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT:    [[VLA_ADDR2:%.*]] = alloca i64, align 8
// CHECK1-NEXT:    [[C_ADDR:%.*]] = alloca i32****, align 8
// CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT:    store i32** [[B]], i32*** [[B_ADDR]], align 8
// CHECK1-NEXT:    store i64 [[VLA1]], i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT:    store i32**** [[C]], i32***** [[C_ADDR]], align 8
// CHECK1-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[B_ADDR]], align 8
// CHECK1-NEXT:    [[TMP2:%.*]] = load i64, i64* [[VLA_ADDR2]], align 8
// CHECK1-NEXT:    [[TMP3:%.*]] = load i32****, i32***** [[C_ADDR]], align 8
// CHECK1-NEXT:    [[TMP4:%.*]] = load i32***, i32**** [[TMP3]], align 8
// CHECK1-NEXT:    [[ARRAYIDX:%.*]] = getelementptr inbounds i32**, i32*** [[TMP4]], i64 0
// CHECK1-NEXT:    [[TMP5:%.*]] = load i32**, i32*** [[ARRAYIDX]], align 8
// CHECK1-NEXT:    [[TMP6:%.*]] = load i32, i32* @a, align 4
// CHECK1-NEXT:    [[IDXPROM:%.*]] = sext i32 [[TMP6]] to i64
// CHECK1-NEXT:    [[TMP7:%.*]] = mul nsw i64 [[IDXPROM]], [[TMP2]]
// CHECK1-NEXT:    [[ARRAYIDX3:%.*]] = getelementptr inbounds i32*, i32** [[TMP5]], i64 [[TMP7]]
// CHECK1-NEXT:    [[ARRAYIDX4:%.*]] = getelementptr inbounds i32*, i32** [[ARRAYIDX3]], i64 0
// CHECK1-NEXT:    [[TMP8:%.*]] = load i32*, i32** [[ARRAYIDX4]], align 8
// CHECK1-NEXT:    [[TMP9:%.*]] = load i32, i32* @a, align 4
// CHECK1-NEXT:    [[IDXPROM5:%.*]] = sext i32 [[TMP9]] to i64
// CHECK1-NEXT:    [[ARRAYIDX6:%.*]] = getelementptr inbounds i32, i32* [[TMP8]], i64 [[IDXPROM5]]
// CHECK1-NEXT:    [[TMP10:%.*]] = load i32, i32* [[ARRAYIDX6]], align 4
// CHECK1-NEXT:    [[TMP11:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT:    [[TMP12:%.*]] = mul nsw i64 0, [[TMP0]]
// CHECK1-NEXT:    [[ARRAYIDX7:%.*]] = getelementptr inbounds i32, i32* [[TMP11]], i64 [[TMP12]]
// CHECK1-NEXT:    [[ARRAYIDX8:%.*]] = getelementptr inbounds i32, i32* [[ARRAYIDX7]], i64 0
// CHECK1-NEXT:    store i32 [[TMP10]], i32* [[ARRAYIDX8]], align 4
// CHECK1-NEXT:    ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@bar
// CHECK1-SAME: (i32 signext [[N:%.*]], i32* [[A:%.*]]) #[[ATTR0]] {
// CHECK1-NEXT:  entry:
// CHECK1-NEXT:    [[N_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT:    [[P:%.*]] = alloca i32*, align 8
// CHECK1-NEXT:    [[DOTTHREADID_TEMP_:%.*]] = alloca i32, align 4
// CHECK1-NEXT:    [[DOTBOUND_ZERO_ADDR:%.*]] = alloca i32, align 4
// CHECK1-NEXT:    store i32 0, i32* [[DOTBOUND_ZERO_ADDR]], align 4
// CHECK1-NEXT:    [[TMP0:%.*]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @[[GLOB1]])
// CHECK1-NEXT:    store i32 [[N]], i32* [[N_ADDR]], align 4
// CHECK1-NEXT:    store i32* [[A]], i32** [[A_ADDR]], align 8
// CHECK1-NEXT:    [[TMP1:%.*]] = load i32, i32* [[N_ADDR]], align 4
// CHECK1-NEXT:    [[TMP2:%.*]] = zext i32 [[TMP1]] to i64
// CHECK1-NEXT:    [[TMP3:%.*]] = bitcast i32** [[A_ADDR]] to i32*
// CHECK1-NEXT:    store i32* [[TMP3]], i32** [[P]], align 8
// CHECK1-NEXT:    call void @__kmpc_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT:    store i32 [[TMP0]], i32* [[DOTTHREADID_TEMP_]], align 4
// CHECK1-NEXT:    call void @.omp_outlined..1(i32* [[DOTTHREADID_TEMP_]], i32* [[DOTBOUND_ZERO_ADDR]], i64 [[TMP2]], i32** [[P]], i32** [[A_ADDR]]) #[[ATTR2]]
// CHECK1-NEXT:    call void @__kmpc_end_serialized_parallel(%struct.ident_t* @[[GLOB1]], i32 [[TMP0]])
// CHECK1-NEXT:    ret void
//
//
// CHECK1-LABEL: define {{[^@]+}}@.omp_outlined..1
// CHECK1-SAME: (i32* noalias [[DOTGLOBAL_TID_:%.*]], i32* noalias [[DOTBOUND_TID_:%.*]], i64 [[VLA:%.*]], i32** nonnull align 8 dereferenceable(8) [[P:%.*]], i32** nonnull align 8 dereferenceable(8) [[A:%.*]]) #[[ATTR1]] {
// CHECK1-NEXT:  entry:
// CHECK1-NEXT:    [[DOTGLOBAL_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT:    [[DOTBOUND_TID__ADDR:%.*]] = alloca i32*, align 8
// CHECK1-NEXT:    [[VLA_ADDR:%.*]] = alloca i64, align 8
// CHECK1-NEXT:    [[P_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT:    [[A_ADDR:%.*]] = alloca i32**, align 8
// CHECK1-NEXT:    store i32* [[DOTGLOBAL_TID_]], i32** [[DOTGLOBAL_TID__ADDR]], align 8
// CHECK1-NEXT:    store i32* [[DOTBOUND_TID_]], i32** [[DOTBOUND_TID__ADDR]], align 8
// CHECK1-NEXT:    store i64 [[VLA]], i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT:    store i32** [[P]], i32*** [[P_ADDR]], align 8
// CHECK1-NEXT:    store i32** [[A]], i32*** [[A_ADDR]], align 8
// CHECK1-NEXT:    [[TMP0:%.*]] = load i64, i64* [[VLA_ADDR]], align 8
// CHECK1-NEXT:    [[TMP1:%.*]] = load i32**, i32*** [[P_ADDR]], align 8
// CHECK1-NEXT:    [[TMP2:%.*]] = load i32**, i32*** [[A_ADDR]], align 8
// CHECK1-NEXT:    [[TMP3:%.*]] = load i32*, i32** [[TMP1]], align 8
// CHECK1-NEXT:    [[TMP4:%.*]] = bitcast i32** [[TMP2]] to i32*
// CHECK1-NEXT:    [[CMP:%.*]] = icmp eq i32* [[TMP3]], [[TMP4]]
// CHECK1-NEXT:    br i1 [[CMP]], label [[IF_THEN:%.*]], label [[IF_END:%.*]]
// CHECK1:       if.then:
// CHECK1-NEXT:    br label [[IF_END]]
// CHECK1:       if.end:
// CHECK1-NEXT:    ret void
//
//
 |