| R"( |
| |
| #ifndef ARM_COMPUTE_HELPER_H |
| #define ARM_COMPUTE_HELPER_H |
| |
| |
| |
| |
| #define STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| #define CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##0), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_1(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##1), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_2(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##2), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_3(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##3), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_4(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##4), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_5(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##5), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_6(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##6), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_7(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##7), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_8(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##8), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define CONVERT_STORE_ROW_10(N0, DATA, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_9(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##9), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_10(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##A), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_11(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##B), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_12(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##C), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_13(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##D), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_14(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##E), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define CONVERT_STORE_ROW_16(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| CONVERT_STORE_ROW_15(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE(N0) \ |
| (CONVERT_SAT((BASENAME##F), VEC_DATA_TYPE(DATA_TYPE, N0)), 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| |
| #define STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| |
| |
| #define CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_ROW_##M0(N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define CONVERT_STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) CONVERT_STORE_BLOCK_STR(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| |
| |
| #define STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##0, 0, (__global DATA_TYPE *)(PTR + 0 * STRIDE_Y + Z##0)); |
| |
| #define STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_1(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##1, 0, (__global DATA_TYPE *)(PTR + 1 * STRIDE_Y + Z##1)); |
| |
| #define STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_2(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##2, 0, (__global DATA_TYPE *)(PTR + 2 * STRIDE_Y + Z##2)); |
| |
| #define STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_3(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##3, 0, (__global DATA_TYPE *)(PTR + 3 * STRIDE_Y + Z##3)); |
| |
| #define STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_4(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##4, 0, (__global DATA_TYPE *)(PTR + 4 * STRIDE_Y + Z##4)); |
| |
| #define STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_5(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##5, 0, (__global DATA_TYPE *)(PTR + 5 * STRIDE_Y + Z##5)); |
| |
| #define STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_6(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##6, 0, (__global DATA_TYPE *)(PTR + 6 * STRIDE_Y + Z##6)); |
| |
| #define STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_7(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##7, 0, (__global DATA_TYPE *)(PTR + 7 * STRIDE_Y + Z##7)); |
| |
| #define STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_8(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##8, 0, (__global DATA_TYPE *)(PTR + 8 * STRIDE_Y + Z##8)); |
| |
| #define STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_9(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##9, 0, (__global DATA_TYPE *)(PTR + 9 * STRIDE_Y + Z##9)); |
| |
| #define STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_10(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##A, 0, (__global DATA_TYPE *)(PTR + 10 * STRIDE_Y + Z##A)); |
| |
| #define STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_11(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##B, 0, (__global DATA_TYPE *)(PTR + 11 * STRIDE_Y + Z##B)); |
| |
| #define STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_12(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##C, 0, (__global DATA_TYPE *)(PTR + 12 * STRIDE_Y + Z##C)); |
| |
| #define STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_13(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##D, 0, (__global DATA_TYPE *)(PTR + 13 * STRIDE_Y + Z##D)); |
| |
| #define STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_14(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##E, 0, (__global DATA_TYPE *)(PTR + 14 * STRIDE_Y + Z##E)); |
| |
| #define STORE_ROW_PARTIAL_16(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| STORE_ROW_PARTIAL_15(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) \ |
| VSTORE_PARTIAL(N0, STORE_N0) \ |
| (BASENAME##F, 0, (__global DATA_TYPE *)(PTR + 15 * STRIDE_Y + Z##F)); |
| |
| |
| |
| #define STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_ROW_PARTIAL_##STORE_M0(N0, STORE_N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| #define STORE_BLOCK_PARTIAL(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) STORE_BLOCK_PARTIAL_STR(STORE_M0, STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| #define STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X) && !(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if((PARTIAL_COND_Y) && !(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else if(!(PARTIAL_COND_Y) && (PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| #define STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) \ |
| if(!(PARTIAL_COND_X)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, PARTIAL_STORE_N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| #define STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) \ |
| if(!(PARTIAL_COND_Y)) \ |
| { \ |
| STORE_BLOCK_PARTIAL(M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } \ |
| else \ |
| { \ |
| STORE_BLOCK_PARTIAL(PARTIAL_STORE_M0, N0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z); \ |
| } |
| |
| |
| #if defined(PARTIAL_STORE_M0) && defined(PARTIAL_STORE_N0) |
| |
| |
| #if PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 == 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z) |
| |
| #elif PARTIAL_STORE_M0 > 0 && PARTIAL_STORE_N0 == 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_COND_Y) |
| |
| #elif PARTIAL_STORE_M0 == 0 && PARTIAL_STORE_N0 > 0 |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_N0, PARTIAL_COND_X) |
| |
| #else |
| |
| #define STORE_BLOCK_BOUNDARY_AWARE(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) \ |
| STORE_BLOCK_PARTIAL_IN_X_AND_Y(M0, N0, DATA_TYPE, BASENAME, PTR, STRIDE_Y, Z, PARTIAL_STORE_M0, PARTIAL_STORE_N0, PARTIAL_COND_Y, PARTIAL_COND_X) |
| |
| #endif |
| |
| #endif |
| |
| |
| #if defined(PARTIAL_STORE_M0) |
| |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(max(0, (int)(y * M0) - (int)((M0 - PARTIAL_STORE_M0) % M0)))) |
| #else |
| #define COMPUTE_M0_START_ROW(y, M0, PARTIAL_STORE_M0) \ |
| ((uint)(y * M0)) |
| #endif |
| |
| |
| |
| #define STORE_VECTOR_SELECT(basename, data_type, ptr, vec_size, leftover, cond) \ |
| STORE_BLOCK_PARTIAL_IN_X(1, vec_size, data_type, basename, ptr, 0, 0, leftover, cond) |
| |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #pragma OPENCL EXTENSION cl_khr_fp16 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ENABLED) && defined(cl_arm_integer_dot_product_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_int8 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_OPENCL_DOT8_ACC_ENABLED) && defined(cl_arm_integer_dot_product_accumulate_int8) |
| #pragma OPENCL EXTENSION cl_arm_integer_dot_product_accumulate_int8 : enable |
| #endif |
| |
| #if defined(ARM_COMPUTE_DEBUG_ENABLED) && defined(cl_arm_printf) |
| #pragma OPENCL EXTENSION cl_arm_printf : enable |
| #endif |
| |
| #define GPU_ARCH_MIDGARD 0x100 |
| #define GPU_ARCH_BIFROST 0x200 |
| #define GPU_ARCH_VALHALL 0x300 |
| |
| |
| #define CONCAT(a, b) a##b |
| |
| |
| #define EXPAND(x) x |
| |
| |
| #define CLAMP(x, min_val, max_val) min(max(x, min_val), max_val) |
| |
| |
| #define REV1(x) ((x)) |
| #define REV2(x) ((x).s10) |
| #define REV3(x) ((x).s210) |
| #define REV4(x) ((x).s3210) |
| #define REV8(x) ((x).s76543210) |
| #define REV16(x) ((x).sFEDCBA9876543210) |
| |
| |
| |
| #define REVERSE_STR(x, s) REV##s((x)) |
| #define REVERSE(x, s) REVERSE_STR(x, s) |
| |
| |
| |
| #define ROT1_0(x) ((x)) |
| #define ROT1_1(x) ((x)) |
| |
| #define ROT2_0(x) ((x)) |
| #define ROT2_1(x) ((x).s10) |
| #define ROT2_2(x) ((x)) |
| |
| #define ROT3_0(x) ((x)) |
| #define ROT3_1(x) ((x).s201) |
| #define ROT3_2(x) ((x).s120) |
| #define ROT3_3(x) ((x)) |
| |
| #define ROT4_0(x) ((x)) |
| #define ROT4_1(x) ((x).s3012) |
| #define ROT4_2(x) ((x).s2301) |
| #define ROT4_3(x) ((x).s1230) |
| #define ROT4_4(x) ((x)) |
| |
| #define ROT8_0(x) ((x)) |
| #define ROT8_1(x) ((x).s70123456) |
| #define ROT8_2(x) ((x).s67012345) |
| #define ROT8_3(x) ((x).s56701234) |
| #define ROT8_4(x) ((x).s45670123) |
| #define ROT8_5(x) ((x).s34567012) |
| #define ROT8_6(x) ((x).s23456701) |
| #define ROT8_7(x) ((x).s12345670) |
| #define ROT8_8(x) ((x)) |
| |
| #define ROT16_0(x) ((x)) |
| #define ROT16_1(x) ((x).sF0123456789ABCDE) |
| #define ROT16_2(x) ((x).sEF0123456789ABCD) |
| #define ROT16_3(x) ((x).sDEF0123456789ABC) |
| #define ROT16_4(x) ((x).sCDEF0123456789AB) |
| #define ROT16_5(x) ((x).sBCDEF0123456789A) |
| #define ROT16_6(x) ((x).sABCDEF0123456789) |
| #define ROT16_7(x) ((x).s9ABCDEF012345678) |
| #define ROT16_8(x) ((x).s89ABCDEF01234567) |
| #define ROT16_9(x) ((x).s789ABCDEF0123456) |
| #define ROT16_10(x) ((x).s6789ABCDEF012345) |
| #define ROT16_11(x) ((x).s56789ABCDEF01234) |
| #define ROT16_12(x) ((x).s456789ABCDEF0123) |
| #define ROT16_13(x) ((x).s3456789ABCDEF012) |
| #define ROT16_14(x) ((x).s23456789ABCDEF01) |
| #define ROT16_15(x) ((x).s123456789ABCDEF0) |
| #define ROT16_16(x) ((x)) |
| |
| |
| |
| #define ROTATE_STR(x, s, n) ROT##s##_##n(x) |
| #define ROTATE(x, s, n) ROTATE_STR(x, s, n) |
| |
| |
| |
| #define V_OFFS1(dt) (dt##1)(0) |
| #define V_OFFS2(dt) (dt##2)(0, 1) |
| #define V_OFFS3(dt) (dt##3)(0, 1, 2) |
| #define V_OFFS4(dt) (dt##4)(0, 1, 2, 3) |
| #define V_OFFS8(dt) (dt##8)(0, 1, 2, 3, 4, 5, 6, 7) |
| #define V_OFFS16(dt) (dt##16)(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15) |
| |
| |
| |
| #define VEC_OFFS_STR(dt, s) V_OFFS##s(dt) |
| #define VEC_OFFS(dt, s) VEC_OFFS_STR(dt, s) |
| |
| |
| #define VLOAD_STR(size) vload##size |
| #define VLOAD(size) VLOAD_STR(size) |
| |
| |
| #define VLOAD_PARTIAL_STR(size, load_size) vload_partial_##size##_##load_size |
| #define VLOAD_PARTIAL(size, load_size) VLOAD_PARTIAL_STR(size, load_size) |
| |
| #define NO_LOAD(data, offs, ptr) \ |
| { \ |
| } |
| |
| |
| #define vload_partial_1_0 NO_LOAD |
| #define vload_partial_1_1 vload1 |
| #define vload_partial_1_2 NO_LOAD |
| #define vload_partial_1_3 NO_LOAD |
| #define vload_partial_1_4 NO_LOAD |
| #define vload_partial_1_5 NO_LOAD |
| #define vload_partial_1_6 NO_LOAD |
| #define vload_partial_1_7 NO_LOAD |
| #define vload_partial_1_8 NO_LOAD |
| #define vload_partial_1_9 NO_LOAD |
| #define vload_partial_1_10 NO_LOAD |
| #define vload_partial_1_11 NO_LOAD |
| #define vload_partial_1_12 NO_LOAD |
| #define vload_partial_1_13 NO_LOAD |
| #define vload_partial_1_14 NO_LOAD |
| #define vload_partial_1_15 NO_LOAD |
| #define vload_partial_1_16 NO_LOAD |
| |
| #define vload_partial_2_0 NO_LOAD |
| #define vload_partial_2_1 vload_partial_1 |
| #define vload_partial_2_2 vload_partial_2 |
| #define vload_partial_2_3 NO_LOAD |
| #define vload_partial_2_4 NO_LOAD |
| #define vload_partial_2_5 NO_LOAD |
| #define vload_partial_2_6 NO_LOAD |
| #define vload_partial_2_7 NO_LOAD |
| #define vload_partial_2_8 NO_LOAD |
| #define vload_partial_2_9 NO_LOAD |
| #define vload_partial_2_10 NO_LOAD |
| #define vload_partial_2_11 NO_LOAD |
| #define vload_partial_2_12 NO_LOAD |
| #define vload_partial_2_13 NO_LOAD |
| #define vload_partial_2_14 NO_LOAD |
| #define vload_partial_2_15 NO_LOAD |
| #define vload_partial_2_16 NO_LOAD |
| |
| #define vload_partial_3_0 NO_LOAD |
| #define vload_partial_3_1 vload_partial_1 |
| #define vload_partial_3_2 vload_partial_2 |
| #define vload_partial_3_3 vload_partial_3 |
| #define vload_partial_3_4 NO_LOAD |
| #define vload_partial_3_5 NO_LOAD |
| #define vload_partial_3_6 NO_LOAD |
| #define vload_partial_3_7 NO_LOAD |
| #define vload_partial_3_8 NO_LOAD |
| #define vload_partial_3_9 NO_LOAD |
| #define vload_partial_3_10 NO_LOAD |
| #define vload_partial_3_11 NO_LOAD |
| #define vload_partial_3_12 NO_LOAD |
| #define vload_partial_3_13 NO_LOAD |
| #define vload_partial_3_14 NO_LOAD |
| #define vload_partial_3_15 NO_LOAD |
| #define vload_partial_3_16 NO_LOAD |
| |
| #define vload_partial_4_0 NO_LOAD |
| #define vload_partial_4_1 vload_partial_1 |
| #define vload_partial_4_2 vload_partial_2 |
| #define vload_partial_4_3 vload_partial_3 |
| #define vload_partial_4_4 vload_partial_4 |
| #define vload_partial_4_5 NO_LOAD |
| #define vload_partial_4_6 NO_LOAD |
| #define vload_partial_4_7 NO_LOAD |
| #define vload_partial_4_8 NO_LOAD |
| #define vload_partial_4_9 NO_LOAD |
| #define vload_partial_4_10 NO_LOAD |
| #define vload_partial_4_11 NO_LOAD |
| #define vload_partial_4_12 NO_LOAD |
| #define vload_partial_4_13 NO_LOAD |
| #define vload_partial_4_14 NO_LOAD |
| #define vload_partial_4_15 NO_LOAD |
| #define vload_partial_4_16 NO_LOAD |
| |
| #define vload_partial_8_0 NO_LOAD |
| #define vload_partial_8_1 vload_partial_1 |
| #define vload_partial_8_2 vload_partial_2 |
| #define vload_partial_8_3 vload_partial_3 |
| #define vload_partial_8_4 vload_partial_4 |
| #define vload_partial_8_5 vload_partial_5 |
| #define vload_partial_8_6 vload_partial_6 |
| #define vload_partial_8_7 vload_partial_7 |
| #define vload_partial_8_8 vload_partial_8 |
| #define vload_partial_8_9 NO_LOAD |
| #define vload_partial_8_10 NO_LOAD |
| #define vload_partial_8_11 NO_LOAD |
| #define vload_partial_8_12 NO_LOAD |
| #define vload_partial_8_13 NO_LOAD |
| #define vload_partial_8_14 NO_LOAD |
| #define vload_partial_8_15 NO_LOAD |
| #define vload_partial_8_16 NO_LOAD |
| |
| #define vload_partial_16_0 NO_LOAD |
| #define vload_partial_16_1 vload_partial_1 |
| #define vload_partial_16_2 vload_partial_2 |
| #define vload_partial_16_3 vload_partial_3 |
| #define vload_partial_16_4 vload_partial_4 |
| #define vload_partial_16_5 vload_partial_5 |
| #define vload_partial_16_6 vload_partial_6 |
| #define vload_partial_16_7 vload_partial_7 |
| #define vload_partial_16_8 vload_partial_8 |
| #define vload_partial_16_9 vload_partial_9 |
| #define vload_partial_16_10 vload_partial_10 |
| #define vload_partial_16_11 vload_partial_11 |
| #define vload_partial_16_12 vload_partial_12 |
| #define vload_partial_16_13 vload_partial_13 |
| #define vload_partial_16_14 vload_partial_14 |
| #define vload_partial_16_15 vload_partial_15 |
| #define vload_partial_16_16 vload_partial_16 |
| |
| |
| #define vload_partial_1(DATA, OFFSET, PTR) \ |
| DATA.s0 = vload1(OFFSET, PTR); |
| |
| #define vload_partial_2(DATA, OFFSET, PTR) \ |
| DATA.s01 = vload2(OFFSET, PTR); |
| |
| #define vload_partial_3(DATA, OFFSET, PTR) \ |
| DATA.s012 = vload3(OFFSET, PTR); |
| |
| #define vload_partial_4(DATA, OFFSET, PTR) \ |
| DATA.s0123 = vload4(OFFSET, PTR); |
| |
| #define vload_partial_5(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| DATA.s4 = vload1(OFFSET, PTR + 4); |
| |
| #define vload_partial_6(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vload_partial_2(DATA.s45, OFFSET, PTR + 4); |
| |
| #define vload_partial_7(DATA, OFFSET, PTR) \ |
| vload_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vload_partial_3(DATA.s456, OFFSET, PTR + 4); |
| |
| #define vload_partial_8(DATA, OFFSET, PTR) \ |
| DATA.s01234567 = vload8(OFFSET, PTR); |
| |
| #define vload_partial_9(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| DATA.s8 = vload1(OFFSET, PTR + 8); |
| |
| #define vload_partial_10(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_2(DATA.s89, OFFSET, PTR + 8); |
| |
| #define vload_partial_11(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_3(DATA.s89A, OFFSET, PTR + 8); |
| |
| #define vload_partial_12(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_4(DATA.s89AB, OFFSET, PTR + 8); |
| |
| #define vload_partial_13(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_5(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_14(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_6(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_15(DATA, OFFSET, PTR) \ |
| vload_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vload_partial_7(DATA.s89ABCDEF, OFFSET, PTR + 8); |
| |
| #define vload_partial_16(DATA, OFFSET, PTR) \ |
| DATA = vload16(OFFSET, PTR); |
| |
| |
| |
| #define PIXEL_UNIT4 1 |
| #define PIXEL_UNIT8 2 |
| #define PIXEL_UNIT16 4 |
| |
| |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) PIXEL_UNIT##vec_size |
| #define CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT(vec_size) CONVERT_VECTOR_SIZE_TO_PIXEL_UNIT_STR(vec_size) |
| |
| |
| #define read_image2d_floatx1(img, x_coord, y_coord) (float4)(read_imagef(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_floatx2(img, x_coord, y_coord) (float8)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_floatx4(img, x_coord, y_coord) (float16)(read_imagef(img, (int2)(x_coord, y_coord)), read_imagef(img, (int2)(x_coord + 1, y_coord)), read_imagef(img, (int2)(x_coord + 2, y_coord)), read_imagef(img, (int2)(x_coord + 3, y_coord))); |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #define read_image2d_halfx1(img, x_coord, y_coord) (half4)(read_imageh(img, (int2)(x_coord, y_coord))); |
| #define read_image2d_halfx2(img, x_coord, y_coord) (half8)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord))); |
| #define read_image2d_halfx4(img, x_coord, y_coord) (half16)(read_imageh(img, (int2)(x_coord, y_coord)), read_imageh(img, (int2)(x_coord + 1, y_coord)), read_imageh(img, (int2)(x_coord + 2, y_coord)), read_imageh(img, (int2)(x_coord + 3, y_coord))); |
| #endif |
| |
| #define write_image2d_floatx1(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values)); |
| #define write_image2d_floatx2(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567)); |
| #define write_image2d_floatx4(img, x_coord, y_coord, values) (write_imagef(img, (int2)(x_coord, y_coord), values.s0123), write_imagef(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imagef(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imagef(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); |
| |
| #if defined(ARM_COMPUTE_OPENCL_FP16_ENABLED) && defined(cl_khr_fp16) |
| #define write_image2d_halfx1(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values)); |
| #define write_image2d_halfx2(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567)); |
| #define write_image2d_halfx4(img, x_coord, y_coord, values) (write_imageh(img, (int2)(x_coord, y_coord), values.s0123), write_imageh(img, (int2)(x_coord + 1, y_coord), values.s4567), write_imageh(img, (int2)(x_coord + 2, y_coord), values.s89AB), write_imageh(img, (int2)(x_coord + 3, y_coord), values.sCDEF)); |
| #endif |
| |
| |
| #define READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) read_image2d_##data_type##x##n0(img, x_coord, y_coord) |
| #define READ_IMAGE2D(data_type, n0, img, x_coord, y_coord) READ_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord) |
| |
| |
| #define WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) write_image2d_##data_type##x##n0(img, x_coord, y_coord, values) |
| #define WRITE_IMAGE2D(data_type, n0, img, x_coord, y_coord, values) WRITE_IMAGE2D_STR(data_type, n0, img, x_coord, y_coord, values) |
| |
| #define VSTORE_STR(size) vstore##size |
| #define VSTORE(size) VSTORE_STR(size) |
| |
| #define float1 float |
| #define half1 half |
| #define char1 char |
| #define uchar1 uchar |
| #define short1 short |
| #define ushort1 ushort |
| #define int1 int |
| #define uint1 uint |
| #define long1 long |
| #define ulong1 ulong |
| #define double1 double |
| |
| #define vload1(OFFSET, PTR) *(OFFSET + PTR) |
| #define vstore1(DATA, OFFSET, PTR) *(OFFSET + PTR) = DATA |
| |
| |
| #define VSTORE_PARTIAL_STR(size, store_size) vstore_partial_##size##_##store_size |
| #define VSTORE_PARTIAL(size, store_size) VSTORE_PARTIAL_STR(size, store_size) |
| |
| #define NO_STORE(data, offs, ptr) \ |
| { \ |
| } |
| |
| |
| #define vstore_partial_1_0 NO_STORE |
| #define vstore_partial_1_1 vstore1 |
| #define vstore_partial_1_2 NO_STORE |
| #define vstore_partial_1_3 NO_STORE |
| #define vstore_partial_1_4 NO_STORE |
| #define vstore_partial_1_5 NO_STORE |
| #define vstore_partial_1_6 NO_STORE |
| #define vstore_partial_1_7 NO_STORE |
| #define vstore_partial_1_8 NO_STORE |
| #define vstore_partial_1_9 NO_STORE |
| #define vstore_partial_1_10 NO_STORE |
| #define vstore_partial_1_11 NO_STORE |
| #define vstore_partial_1_12 NO_STORE |
| #define vstore_partial_1_13 NO_STORE |
| #define vstore_partial_1_14 NO_STORE |
| #define vstore_partial_1_15 NO_STORE |
| #define vstore_partial_1_16 NO_STORE |
| |
| #define vstore_partial_2_0 NO_STORE |
| #define vstore_partial_2_1 vstore_partial_1 |
| #define vstore_partial_2_2 vstore_partial_2 |
| #define vstore_partial_2_3 NO_STORE |
| #define vstore_partial_2_4 NO_STORE |
| #define vstore_partial_2_5 NO_STORE |
| #define vstore_partial_2_6 NO_STORE |
| #define vstore_partial_2_7 NO_STORE |
| #define vstore_partial_2_8 NO_STORE |
| #define vstore_partial_2_9 NO_STORE |
| #define vstore_partial_2_10 NO_STORE |
| #define vstore_partial_2_11 NO_STORE |
| #define vstore_partial_2_12 NO_STORE |
| #define vstore_partial_2_13 NO_STORE |
| #define vstore_partial_2_14 NO_STORE |
| #define vstore_partial_2_15 NO_STORE |
| #define vstore_partial_2_16 NO_STORE |
| |
| #define vstore_partial_3_0 NO_STORE |
| #define vstore_partial_3_1 vstore_partial_1 |
| #define vstore_partial_3_2 vstore_partial_2 |
| #define vstore_partial_3_3 vstore_partial_3 |
| #define vstore_partial_3_4 NO_STORE |
| #define vstore_partial_3_5 NO_STORE |
| #define vstore_partial_3_6 NO_STORE |
| #define vstore_partial_3_7 NO_STORE |
| #define vstore_partial_3_8 NO_STORE |
| #define vstore_partial_3_9 NO_STORE |
| #define vstore_partial_3_10 NO_STORE |
| #define vstore_partial_3_11 NO_STORE |
| #define vstore_partial_3_12 NO_STORE |
| #define vstore_partial_3_13 NO_STORE |
| #define vstore_partial_3_14 NO_STORE |
| #define vstore_partial_3_15 NO_STORE |
| #define vstore_partial_3_16 NO_STORE |
| |
| #define vstore_partial_4_0 NO_STORE |
| #define vstore_partial_4_1 vstore_partial_1 |
| #define vstore_partial_4_2 vstore_partial_2 |
| #define vstore_partial_4_3 vstore_partial_3 |
| #define vstore_partial_4_4 vstore_partial_4 |
| #define vstore_partial_4_5 NO_STORE |
| #define vstore_partial_4_6 NO_STORE |
| #define vstore_partial_4_7 NO_STORE |
| #define vstore_partial_4_8 NO_STORE |
| #define vstore_partial_4_9 NO_STORE |
| #define vstore_partial_4_10 NO_STORE |
| #define vstore_partial_4_11 NO_STORE |
| #define vstore_partial_4_12 NO_STORE |
| #define vstore_partial_4_13 NO_STORE |
| #define vstore_partial_4_14 NO_STORE |
| #define vstore_partial_4_15 NO_STORE |
| #define vstore_partial_4_16 NO_STORE |
| |
| #define vstore_partial_8_0 NO_STORE |
| #define vstore_partial_8_1 vstore_partial_1 |
| #define vstore_partial_8_2 vstore_partial_2 |
| #define vstore_partial_8_3 vstore_partial_3 |
| #define vstore_partial_8_4 vstore_partial_4 |
| #define vstore_partial_8_5 vstore_partial_5 |
| #define vstore_partial_8_6 vstore_partial_6 |
| #define vstore_partial_8_7 vstore_partial_7 |
| #define vstore_partial_8_8 vstore_partial_8 |
| #define vstore_partial_8_9 NO_STORE |
| #define vstore_partial_8_10 NO_STORE |
| #define vstore_partial_8_11 NO_STORE |
| #define vstore_partial_8_12 NO_STORE |
| #define vstore_partial_8_13 NO_STORE |
| #define vstore_partial_8_14 NO_STORE |
| #define vstore_partial_8_15 NO_STORE |
| #define vstore_partial_8_16 NO_STORE |
| |
| #define vstore_partial_16_0 NO_STORE |
| #define vstore_partial_16_1 vstore_partial_1 |
| #define vstore_partial_16_2 vstore_partial_2 |
| #define vstore_partial_16_3 vstore_partial_3 |
| #define vstore_partial_16_4 vstore_partial_4 |
| #define vstore_partial_16_5 vstore_partial_5 |
| #define vstore_partial_16_6 vstore_partial_6 |
| #define vstore_partial_16_7 vstore_partial_7 |
| #define vstore_partial_16_8 vstore_partial_8 |
| #define vstore_partial_16_9 vstore_partial_9 |
| #define vstore_partial_16_10 vstore_partial_10 |
| #define vstore_partial_16_11 vstore_partial_11 |
| #define vstore_partial_16_12 vstore_partial_12 |
| #define vstore_partial_16_13 vstore_partial_13 |
| #define vstore_partial_16_14 vstore_partial_14 |
| #define vstore_partial_16_15 vstore_partial_15 |
| #define vstore_partial_16_16 vstore_partial_16 |
| |
| |
| #define vstore_partial_1(DATA, OFFSET, PTR) \ |
| vstore1(DATA.s0, OFFSET, PTR); |
| |
| #define vstore_partial_2(DATA, OFFSET, PTR) \ |
| vstore2(DATA.s01, OFFSET, PTR); |
| |
| #define vstore_partial_3(DATA, OFFSET, PTR) \ |
| vstore3(DATA.s012, OFFSET, PTR); |
| |
| #define vstore_partial_4(DATA, OFFSET, PTR) \ |
| vstore4(DATA.s0123, OFFSET, PTR); |
| |
| #define vstore_partial_5(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore1(DATA.s4, OFFSET, PTR + 4); |
| |
| #define vstore_partial_6(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s45, OFFSET, PTR + 4); |
| |
| #define vstore_partial_7(DATA, OFFSET, PTR) \ |
| vstore_partial_4(DATA.s0123, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s456, OFFSET, PTR + 4); |
| |
| #define vstore_partial_8(DATA, OFFSET, PTR) \ |
| vstore8(DATA.s01234567, OFFSET, PTR); |
| |
| #define vstore_partial_9(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore1(DATA.s8, OFFSET, PTR + 8); |
| |
| #define vstore_partial_10(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_2(DATA.s89, OFFSET, PTR + 8); |
| |
| #define vstore_partial_11(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_3(DATA.s89a, OFFSET, PTR + 8); |
| |
| #define vstore_partial_12(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_4(DATA.s89ab, OFFSET, PTR + 8); |
| |
| #define vstore_partial_13(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_5(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_14(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_6(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_15(DATA, OFFSET, PTR) \ |
| vstore_partial_8(DATA.s01234567, OFFSET, PTR); \ |
| vstore_partial_7(DATA.s89abcdef, OFFSET, PTR + 8); |
| |
| #define vstore_partial_16(DATA, OFFSET, PTR) \ |
| vstore16(DATA, OFFSET, PTR); |
| |
| |
| |
| |
| |
| #define convert_float_sat convert_float |
| #define convert_float1_sat convert_float |
| #define convert_float2_sat convert_float2 |
| #define convert_float3_sat convert_float3 |
| #define convert_float4_sat convert_float4 |
| #define convert_float8_sat convert_float8 |
| #define convert_float16_sat convert_float16 |
| #define convert_half_sat convert_float |
| #define convert_half1_sat convert_half |
| #define convert_half2_sat convert_half2 |
| #define convert_half3_sat convert_half3 |
| #define convert_half4_sat convert_half4 |
| #define convert_half8_sat convert_half8 |
| #define convert_half16_sat convert_half16 |
| |
| #define convert_float1 convert_float |
| #define convert_half1 convert_half |
| #define convert_char1 convert_char |
| #define convert_uchar1 convert_uchar |
| #define convert_short1 convert_short |
| #define convert_ushort1 convert_ushort |
| #define convert_int1 convert_int |
| #define convert_uint1 convert_uint |
| #define convert_long1 convert_long |
| #define convert_ulong1 convert_ulong |
| #define convert_double1 convert_double |
| |
| #define convert_char1_sat convert_char_sat |
| #define convert_uchar1_sat convert_uchar_sat |
| #define convert_uchar2_sat convert_uchar2_sat |
| #define convert_uchar3_sat convert_uchar3_sat |
| #define convert_uchar4_sat convert_uchar4_sat |
| #define convert_uchar8_sat convert_uchar8_sat |
| #define convert_uchar16_sat convert_uchar16_sat |
| #define convert_short1_sat convert_short_sat |
| #define convert_ushort1_sat convert_ushort_sat |
| #define convert_int1_sat convert_int_sat |
| #define convert_uint1_sat convert_uint_sat |
| #define convert_long1_sat convert_long_sat |
| #define convert_ulong1_sat convert_ulong_sat |
| #define convert_double1_sat convert_double_sat |
| |
| #define VEC_DATA_TYPE_STR(type, size) type##size |
| #define VEC_DATA_TYPE(type, size) VEC_DATA_TYPE_STR(type, size) |
| |
| #define CONVERT_STR(x, type) (convert_##type((x))) |
| #define CONVERT(x, type) CONVERT_STR(x, type) |
| |
| #define CONVERT_SAT_STR(x, type) (convert_##type##_sat((x))) |
| #define CONVERT_SAT(x, type) CONVERT_SAT_STR(x, type) |
| |
| #define CONVERT_SAT_ROUND_STR(x, type, round) (convert_##type##_sat_##round((x))) |
| #define CONVERT_SAT_ROUND(x, type, round) CONVERT_SAT_ROUND_STR(x, type, round) |
| |
| #define select_vec_dt_uchar(size) uchar##size |
| #define select_vec_dt_char(size) char##size |
| #define select_vec_dt_ushort(size) ushort##size |
| #define select_vec_dt_short(size) short##size |
| #define select_vec_dt_half(size) short##size |
| #define select_vec_dt_uint(size) uint##size |
| #define select_vec_dt_int(size) int##size |
| #define select_vec_dt_float(size) int##size |
| #define select_vec_dt_ulong(size) ulong##size |
| #define select_vec_dt_long(size) long##size |
| |
| #define SELECT_VEC_DATA_TYPE_STR(type, size) select_vec_dt_##type(size) |
| #define SELECT_VEC_DATA_TYPE(type, size) SELECT_VEC_DATA_TYPE_STR(type, size) |
| #define SELECT_DATA_TYPE(type) SELECT_VEC_DATA_TYPE_STR(type, 1) |
| |
| #define signed_int_vec_dt_uchar(size) char##size |
| #define signed_int_vec_dt_char(size) char##size |
| #define signed_int_vec_dt_ushort(size) short##size |
| #define signed_int_vec_dt_short(size) short##size |
| #define signed_int_vec_dt_half(size) short##size |
| #define signed_int_vec_dt_uint(size) int##size |
| #define signed_int_vec_dt_int(size) int##size |
| #define signed_int_vec_dt_float(size) int##size |
| #define signed_int_vec_dt_ulong(size) long##size |
| #define signed_int_vec_dt_long(size) long##size |
| |
| #define SIGNED_INT_VEC_DATA_TYPE_STR(type, size) signed_int_vec_dt_##type(size) |
| #define SIGNED_INT_VEC_DATA_TYPE(type, size) SIGNED_INT_VEC_DATA_TYPE_STR(type, size) |
| #define SIGNED_INT_DATA_TYPE(type) SIGNED_INT_VEC_DATA_TYPE_STR(type, 1) |
| |
| #define sum_reduce_1(x) (x) |
| #define sum_reduce_2(x) ((x).s0) + ((x).s1) |
| #define sum_reduce_3(x) sum_reduce_2((x).s01) + ((x).s2) |
| #define sum_reduce_4(x) sum_reduce_2((x).s01) + sum_reduce_2((x).s23) |
| #define sum_reduce_8(x) sum_reduce_4((x).s0123) + sum_reduce_4((x).s4567) |
| #define sum_reduce_16(x) sum_reduce_8((x).s01234567) + sum_reduce_8((x).s89ABCDEF) |
| |
| #define SUM_REDUCE_STR(x, size) sum_reduce_##size(x) |
| #define SUM_REDUCE(x, size) SUM_REDUCE_STR(x, size) |
| |
| #define prod_reduce_1(x) (x) |
| #define prod_reduce_2(x) ((x).s0) * ((x).s1) |
| #define prod_reduce_3(x) prod_reduce_2((x).s01) * ((x).s2) |
| #define prod_reduce_4(x) prod_reduce_2((x).s01) * prod_reduce_2((x).s23) |
| #define prod_reduce_8(x) prod_reduce_4((x).s0123) * prod_reduce_4((x).s4567) |
| #define prod_reduce_16(x) prod_reduce_8((x).s01234567) * prod_reduce_8((x).s89ABCDEF) |
| |
| #define PROD_REDUCE_STR(x, size) prod_reduce_##size(x) |
| #define PROD_REDUCE(x, size) PROD_REDUCE_STR(x, size) |
| |
| #define max_reduce_1(x) (x) |
| #define max_reduce_2(x) max(((x).s0), ((x).s1)) |
| #define max_reduce_3(x) max(max_reduce_2((x).s01), ((x).s2)) |
| #define max_reduce_4(x) max(max_reduce_2((x).s01), max_reduce_2((x).s23)) |
| #define max_reduce_8(x) max(max_reduce_4((x).s0123), max_reduce_4((x).s4567)) |
| #define max_reduce_16(x) max(max_reduce_8((x).s01234567), max_reduce_8((x).s89ABCDEF)) |
| |
| #define MAX_REDUCE_STR(x, size) max_reduce_##size(x) |
| #define MAX_REDUCE(x, size) MAX_REDUCE_STR(x, size) |
| |
| #define VECTOR_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define IMAGE_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR3D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR4D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define TENSOR5D_DECLARATION(name) \ |
| __global uchar *name##_ptr, \ |
| uint name##_stride_x, \ |
| uint name##_step_x, \ |
| uint name##_stride_y, \ |
| uint name##_step_y, \ |
| uint name##_stride_z, \ |
| uint name##_step_z, \ |
| uint name##_stride_w, \ |
| uint name##_step_w, \ |
| uint name##_stride_v, \ |
| uint name##_step_v, \ |
| uint name##_offset_first_element_in_bytes |
| |
| #define CONVERT_TO_VECTOR_STRUCT(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x) |
| |
| #define CONVERT_TO_VECTOR_STRUCT_NO_STEP(name) \ |
| update_vector_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0) |
| |
| #define CONVERT_TO_IMAGE_STRUCT(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y) |
| |
| #define CONVERT_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT_NO_STEP(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TENSOR3D_TO_IMAGE_STRUCT(name) \ |
| update_image_from_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(name) \ |
| update_tensor3D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z, name##_stride_w, name##_step_w, mod_size) |
| |
| #define CONVERT_TO_TENSOR4D_STRUCT_NO_STEP(name, mod_size) \ |
| update_tensor4D_workitem_ptr(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, 0, name##_stride_y, 0, name##_stride_z, 0, name##_stride_w, 0, mod_size) |
| |
| #define CONVERT_TO_TENSOR3D_STRUCT_NO_UPDATE_PTR(name) \ |
| tensor3D_ptr_no_update(name##_ptr, name##_offset_first_element_in_bytes, name##_stride_x, name##_step_x, name##_stride_y, name##_step_y, \ |
| name##_stride_z, name##_step_z) |
| |
| |
| typedef struct Vector |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| } Vector; |
| |
| |
| typedef struct Image |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| } Image; |
| |
| |
| typedef struct Tensor3D |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| int stride_z; |
| } Tensor3D; |
| |
| |
| typedef struct Tensor4D |
| { |
| __global uchar *ptr; |
| int offset_first_element_in_bytes; |
| int stride_x; |
| int stride_y; |
| int stride_z; |
| int stride_w; |
| } Tensor4D; |
| |
| |
| inline Vector update_vector_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x) |
| { |
| Vector vector = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| }; |
| vector.ptr += vector.offset_first_element_in_bytes + get_global_id(0) * step_x; |
| return vector; |
| } |
| |
| |
| inline Image update_image_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y; |
| return img; |
| } |
| |
| |
| inline Image update_image_from_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Image img = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y |
| }; |
| img.ptr += img.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return img; |
| } |
| |
| |
| inline Tensor3D update_tensor3D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + get_global_id(2) * step_z; |
| return tensor; |
| } |
| |
| |
| inline Tensor3D tensor3D_ptr_no_update(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z) |
| { |
| Tensor3D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z |
| }; |
| return tensor; |
| } |
| |
| inline Tensor4D update_tensor4D_workitem_ptr(__global uchar *ptr, uint offset_first_element_in_bytes, uint stride_x, uint step_x, uint stride_y, uint step_y, uint stride_z, uint step_z, uint stride_w, |
| uint step_w, |
| uint mod_size) |
| { |
| Tensor4D tensor = |
| { |
| .ptr = ptr, |
| .offset_first_element_in_bytes = offset_first_element_in_bytes, |
| .stride_x = stride_x, |
| .stride_y = stride_y, |
| .stride_z = stride_z, |
| .stride_w = stride_w |
| }; |
| |
| tensor.ptr += tensor.offset_first_element_in_bytes + get_global_id(0) * step_x + get_global_id(1) * step_y + (get_global_id(2) % mod_size) * step_z + (get_global_id(2) / mod_size) * step_w; |
| return tensor; |
| } |
| |
| |
| inline __global const uchar *vector_offset(const Vector *vec, int x) |
| { |
| return vec->ptr + x * vec->stride_x; |
| } |
| |
| |
| inline __global uchar *offset(const Image *img, int x, int y) |
| { |
| return img->ptr + x * img->stride_x + y * img->stride_y; |
| } |
| |
| |
| inline __global const uchar *tensor3D_offset(const Tensor3D *tensor, int x, int y, int z) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z; |
| } |
| |
| |
| inline __global const uchar *tensor4D_offset(const Tensor4D *tensor, int x, int y, int z, int w) |
| { |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + w * tensor->stride_w; |
| } |
| |
| |
| inline __global const uchar *tensor3D_index2ptr(const Tensor3D *tensor, uint width, uint height, uint depth, uint index) |
| { |
| uint num_elements = width * height; |
| |
| const uint z = index / num_elements; |
| |
| index %= num_elements; |
| |
| const uint y = index / width; |
| |
| index %= width; |
| |
| const uint x = index; |
| |
| return tensor->ptr + x * tensor->stride_x + y * tensor->stride_y + z * tensor->stride_z + tensor->offset_first_element_in_bytes; |
| } |
| |
| #endif |
| |
| #if defined(DATA_TYPE) |
| |
| #define TWIDDLE_FACTOR_MULTIPLICATION(phi, input) \ |
| { \ |
| VEC_DATA_TYPE(DATA_TYPE, 2) \ |
| w, tmp; \ |
| w.x = cos(phi); \ |
| w.y = sin(phi); \ |
| tmp.x = (w.x * input.x) - (w.y * input.y); \ |
| tmp.y = (w.x * input.y) + (w.y * input.x); \ |
| input = tmp; \ |
| } |
| |
| |
| #define DFT_2(c0, c1) \ |
| { \ |
| VEC_DATA_TYPE(DATA_TYPE, 2) \ |
| v0; \ |
| v0 = c0; \ |
| c0 = v0 + c1; \ |
| c1 = v0 - c1; \ |
| } |
| |
| |
| #define SQRT3DIV2 0.86602540378443f |
| |
| |
| #define DFT_3(c0, c1, c2) \ |
| { \ |
| VEC_DATA_TYPE(DATA_TYPE, 2) \ |
| v0 = c1 + c2; \ |
| VEC_DATA_TYPE(DATA_TYPE, 2) \ |
| v1 = c1 - c2; \ |
| c1.x = c0.x - 0.5f * v0.x + v1.y * SQRT3DIV2; \ |
| c1.y = c0.y - 0.5f * v0.y - v1.x * SQRT3DIV2; \ |
| c2.x = c0.x - 0.5f * v0.x - v1.y * SQRT3DIV2; \ |
| c2.y = c0.y - 0.5f * v0.y + v1.x * SQRT3DIV2; \ |
| c0 = c0 + v0; \ |
| } |
| |
| |
| #define DFT_4(c0, c1, c2, c3) \ |
| { \ |
| VEC_DATA_TYPE(DATA_TYPE, 2) \ |
| v0, v1, v2, v3; \ |
| v0 = c0 + c2; \ |
| v1 = c1 + c3; \ |
| v2 = c0 - c2; \ |
| v3.x = c1.y - c3.y; \ |
| v3.y = c3.x - c1.x; \ |
| c0 = v0 + v1; \ |
| c2 = v0 - v1; \ |
| c1 = v2 + v3; \ |
| c3 = v2 - v3; \ |
| } |
| |
| |
| #define W5_A (DATA_TYPE)0.30901699437494f |
| #define W5_B (DATA_TYPE)0.95105651629515f |
| #define W5_C (DATA_TYPE)0.80901699437494f |
| #define W5_D (DATA_TYPE)0.58778525229247f |
| |
| |
| #define DFT_5(c0, c1, c2, c3, c4) \ |
| { \ |
| VEC_DATA_TYPE(DATA_TYPE, 2) \ |
| v0, v1, v2, v3, v4; \ |
| v0 = c0; \ |
| v1 = W5_A * (c1 + c4) - W5_C * (c2 + c3); \ |
| v2 = W5_C * (c1 + c4) - W5_A * (c2 + c3); \ |
| v3 = W5_D * (c1 - c4) - W5_B * (c2 - c3); \ |
| v4 = W5_B * (c1 - c4) + W5_D * (c2 - c3); \ |
| c0 = v0 + c1 + c2 + c3 + c4; \ |
| c1 = v0 + v1 + (VEC_DATA_TYPE(DATA_TYPE, 2))(v4.y, -v4.x); \ |
| c2 = v0 - v2 + (VEC_DATA_TYPE(DATA_TYPE, 2))(v3.y, -v3.x); \ |
| c3 = v0 - v2 + (VEC_DATA_TYPE(DATA_TYPE, 2))(-v3.y, v3.x); \ |
| c4 = v0 + v1 + (VEC_DATA_TYPE(DATA_TYPE, 2))(-v4.y, v4.x); \ |
| } |
| |
| |
| #define W7_A (DATA_TYPE)0.62348980185873f |
| #define W7_B (DATA_TYPE)0.78183148246802f |
| #define W7_C (DATA_TYPE)0.22252093395631f |
| #define W7_D (DATA_TYPE)0.97492791218182f |
| #define W7_E (DATA_TYPE)0.90096886790241f |
| #define W7_F (DATA_TYPE)0.43388373911755f |
| |
| |
| #define DFT_7(c0, c1, c2, c3, c4, c5, c6) \ |
| { \ |
| VEC_DATA_TYPE(DATA_TYPE, 2) \ |
| v0, v1, v2, v3, v4, v5, v6; \ |
| v0 = c0; \ |
| v1 = W7_A * (c1 + c6) - W7_C * (c2 + c5) - W7_E * (c3 + c4); \ |
| v2 = W7_C * (c1 + c6) + W7_E * (c2 + c5) - W7_A * (c3 + c4); \ |
| v3 = W7_E * (c1 + c6) - W7_A * (c2 + c5) + W7_C * (c3 + c4); \ |
| v4 = W7_B * (c1 - c6) + W7_D * (c2 - c5) + W7_F * (c3 - c4); \ |
| v5 = W7_D * (c1 - c6) - W7_F * (c2 - c5) - W7_B * (c3 - c4); \ |
| v6 = W7_F * (c1 - c6) - W7_B * (c2 - c5) + W7_D * (c3 - c4); \ |
| c0 = v0 + c1 + c2 + c3 + c4 + c5 + c6; \ |
| c1 = v0 + v1 + (VEC_DATA_TYPE(DATA_TYPE, 2))(v4.y, -v4.x); \ |
| c2 = v0 - v2 + (VEC_DATA_TYPE(DATA_TYPE, 2))(v5.y, -v5.x); \ |
| c3 = v0 - v3 + (VEC_DATA_TYPE(DATA_TYPE, 2))(v6.y, -v6.x); \ |
| c4 = v0 - v3 + (VEC_DATA_TYPE(DATA_TYPE, 2))(-v6.y, v6.x); \ |
| c5 = v0 - v2 + (VEC_DATA_TYPE(DATA_TYPE, 2))(-v5.y, v5.x); \ |
| c6 = v0 + v1 + (VEC_DATA_TYPE(DATA_TYPE, 2))(-v4.y, v4.x); \ |
| } |
| |
| |
| #define DFT_8(c0, c1, c2, c3, c4, c5, c6, c7) \ |
| { \ |
| VEC_DATA_TYPE(DATA_TYPE, 2) \ |
| v0, v1, v2, v3, v4, v5, v6, v7; \ |
| VEC_DATA_TYPE(DATA_TYPE, 2) \ |
| s0, s1, s2, s3, s4, s5, s6, s7; \ |
| VEC_DATA_TYPE(DATA_TYPE, 2) \ |
| t0, t1, t2; \ |
| v0 = c0 + c4; \ |
| v1 = c1 + c5; \ |
| v2 = c2 + c6; \ |
| v3 = c3 + c7; \ |
| v4 = c0 - c4; \ |
| v5 = c1 - c5; \ |
| v6 = c2 - c6; \ |
| v7 = c3 - c7; \ |
| s0 = v0 + v2; \ |
| s1 = v1 + v3; \ |
| s2 = v0 - v2; \ |
| s3 = v1 - v3; \ |
| s4.x = v4.x - v6.y; \ |
| s4.y = v4.y + v6.x; \ |
| s5.x = v5.x - v7.y; \ |
| s5.y = v5.y + v7.x; \ |
| s6.x = v4.x + v6.y; \ |
| s6.y = v4.y - v6.x; \ |
| s7.x = v5.x + v7.y; \ |
| s7.y = v5.y - v7.x; \ |
| t0.x = -s3.y; \ |
| t0.y = s3.x; \ |
| t1.x = M_SQRT1_2_F * (s5.x - s5.y); \ |
| t1.y = M_SQRT1_2_F * (s5.x + s5.y); \ |
| t2.x = -M_SQRT1_2_F * (s7.x + s7.y); \ |
| t2.y = M_SQRT1_2_F * (s7.x - s7.y); \ |
| c0 = s0 + s1; \ |
| c1 = s6 - t2; \ |
| c2 = s2 - t0; \ |
| c3 = s4 - t1; \ |
| c4 = s0 - s1; \ |
| c5 = s6 + t2; \ |
| c6 = s2 + t0; \ |
| c7 = s4 + t1; \ |
| } |
| |
| |
| __kernel void fft_radix_2_first_stage_axis_0( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| ) |
| { |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 4) |
| data = vload4(0, (__global DATA_TYPE *)input.ptr); |
| |
| |
| DFT_2(data.s01, data.s23); |
| |
| |
| vstore4(data, 0, (__global DATA_TYPE *)output.ptr); |
| } |
| |
| |
| __kernel void fft_radix_2_first_stage_axis_1( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| ) |
| { |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data1 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); |
| |
| |
| DFT_2(data1, data2); |
| |
| |
| vstore2(data1, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 1, 0)); |
| } |
| |
| |
| __kernel void fft_radix_3_first_stage_axis_0( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| ) |
| { |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 4) |
| data0 = vload4(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 2, 0, 0)); |
| |
| |
| DFT_3(data0.s01, data0.s23, data1.s01); |
| |
| |
| vstore4(data0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 2, 0, 0)); |
| } |
| |
| |
| __kernel void fft_radix_3_first_stage_axis_1( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| ) |
| { |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); |
| |
| |
| DFT_3(data0, data1, data2); |
| |
| |
| vstore2(data0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 1, 0)); |
| vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2, 0)); |
| } |
| |
| |
| __kernel void fft_radix_4_first_stage_axis_0( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| ) |
| { |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 8) |
| data = vload8(0, (__global DATA_TYPE *)input.ptr); |
| |
| |
| DFT_4(data.s01, data.s23, data.s45, data.s67); |
| |
| |
| vstore8(data, 0, (__global DATA_TYPE *)output.ptr); |
| } |
| |
| |
| __kernel void fft_radix_4_first_stage_axis_1( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| ) |
| { |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3, 0)); |
| |
| |
| DFT_4(data0, data1, data2, data3); |
| |
| |
| vstore2(data0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 1, 0)); |
| vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2, 0)); |
| vstore2(data3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3, 0)); |
| } |
| |
| |
| __kernel void fft_radix_5_first_stage_axis_0( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| ) |
| { |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 8) |
| data0 = vload8(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 4, 0, 0)); |
| |
| |
| DFT_5(data0.s01, data0.s23, data0.s45, data0.s67, data1.s01); |
| |
| |
| vstore8(data0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 4, 0, 0)); |
| } |
| |
| |
| __kernel void fft_radix_5_first_stage_axis_1( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| ) |
| { |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 4, 0)); |
| |
| |
| DFT_5(data0, data1, data2, data3, data4); |
| |
| |
| vstore2(data0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 1, 0)); |
| vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2, 0)); |
| vstore2(data3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3, 0)); |
| vstore2(data4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 4, 0)); |
| } |
| |
| |
| __kernel void fft_radix_7_first_stage_axis_0( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| ) |
| { |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 8) |
| data0 = vload8(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 4) |
| data1 = vload4(0, (__global DATA_TYPE *)tensor3D_offset(&input, 4, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 6, 0, 0)); |
| |
| |
| DFT_7(data0.s01, data0.s23, data0.s45, data0.s67, data1.s01, data1.s23, data2.s01); |
| |
| |
| vstore8(data0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore4(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 4, 0, 0)); |
| vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 6, 0, 0)); |
| } |
| |
| |
| __kernel void fft_radix_7_first_stage_axis_1( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| ) |
| { |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 4, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data5 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 5, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data6 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 6, 0)); |
| |
| |
| DFT_7(data0, data1, data2, data3, data4, data5, data6); |
| |
| |
| vstore2(data0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 1, 0)); |
| vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2, 0)); |
| vstore2(data3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3, 0)); |
| vstore2(data4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 4, 0)); |
| vstore2(data5, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 5, 0)); |
| vstore2(data6, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 6, 0)); |
| } |
| |
| |
| __kernel void fft_radix_8_first_stage_axis_0( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| ) |
| { |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 16) |
| data = vload16(0, (__global DATA_TYPE *)input.ptr); |
| |
| |
| DFT_8(data.s01, data.s23, data.s45, data.s67, data.s89, data.sAB, data.sCD, data.sEF); |
| |
| |
| vstore16(data, 0, (__global DATA_TYPE *)output.ptr); |
| } |
| |
| |
| __kernel void fft_radix_8_first_stage_axis_1( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| ) |
| { |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT(input); |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT(output); |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 1, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 4, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data5 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 5, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data6 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 6, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| data7 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 7, 0)); |
| |
| |
| DFT_8(data0, data1, data2, data3, data4, data5, data6, data7); |
| |
| |
| vstore2(data0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(data1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 1, 0)); |
| vstore2(data2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2, 0)); |
| vstore2(data3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3, 0)); |
| vstore2(data4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 4, 0)); |
| vstore2(data5, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 5, 0)); |
| vstore2(data6, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 6, 0)); |
| vstore2(data7, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 7, 0)); |
| } |
| |
| |
| __kernel void fft_radix_2_axis_0( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| , |
| uint Nx, uint Ni, float exp_const) |
| { |
| |
| uint kx = get_global_id(0); |
| |
| |
| uint nx = kx % Nx; |
| |
| |
| uint n = nx + (kx / Nx) * Ni; |
| |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input); |
| input.ptr += n * input.stride_x + get_global_id(1) * input.stride_y + get_global_id(2) * input.stride_z; |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output); |
| output.ptr += n * output.stride_x + get_global_id(1) * output.stride_y + get_global_id(2) * output.stride_z; |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, Nx, 0, 0)); |
| |
| |
| DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; |
| |
| |
| TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); |
| |
| |
| DFT_2(c0, c1); |
| |
| |
| vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, Nx, 0, 0)); |
| } |
| |
| |
| __kernel void fft_radix_2_axis_1( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| , |
| uint Nx, uint Ni, float exp_const) |
| { |
| |
| uint kx = get_global_id(1); |
| |
| |
| uint nx = kx % Nx; |
| |
| |
| uint n = nx + (kx / Nx) * Ni; |
| |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input); |
| input.ptr += get_global_id(0) * input.stride_x + n * input.stride_y + get_global_id(2) * input.stride_z; |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output); |
| output.ptr += get_global_id(0) * output.stride_x + n * output.stride_y + get_global_id(2) * output.stride_z; |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, Nx, 0)); |
| |
| |
| DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; |
| |
| |
| TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); |
| |
| |
| DFT_2(c0, c1); |
| |
| |
| vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, Nx, 0)); |
| } |
| |
| |
| __kernel void fft_radix_3_axis_0( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| , |
| uint Nx, uint Ni, float exp_const) |
| { |
| |
| uint kx = get_global_id(0); |
| |
| |
| uint nx = kx % Nx; |
| |
| |
| uint n = nx + (kx / Nx) * Ni; |
| |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input); |
| input.ptr += n * input.stride_x + get_global_id(1) * input.stride_y + get_global_id(2) * input.stride_z; |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output); |
| output.ptr += n * output.stride_x + get_global_id(1) * output.stride_y + get_global_id(2) * output.stride_z; |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 2 * Nx, 0, 0)); |
| |
| |
| DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; |
| |
| |
| TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); |
| TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2); |
| |
| |
| DFT_3(c0, c1, c2); |
| |
| |
| vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, Nx, 0, 0)); |
| vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 2 * Nx, 0, 0)); |
| } |
| |
| |
| __kernel void fft_radix_3_axis_1( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| , |
| uint Nx, uint Ni, float exp_const) |
| { |
| |
| uint kx = get_global_id(1); |
| |
| |
| uint nx = kx % Nx; |
| |
| |
| uint n = nx + (kx / Nx) * Ni; |
| |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input); |
| input.ptr += get_global_id(0) * input.stride_x + n * input.stride_y + get_global_id(2) * input.stride_z; |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output); |
| output.ptr += get_global_id(0) * output.stride_x + n * output.stride_y + get_global_id(2) * output.stride_z; |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2 * Nx, 0)); |
| |
| |
| DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; |
| |
| |
| TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); |
| TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2); |
| |
| |
| DFT_3(c0, c1, c2); |
| |
| |
| vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, Nx, 0)); |
| vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2 * Nx, 0)); |
| } |
| |
| |
| __kernel void fft_radix_4_axis_0( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| , |
| uint Nx, uint Ni, float exp_const) |
| { |
| |
| uint kx = get_global_id(0); |
| |
| |
| uint nx = kx % Nx; |
| |
| |
| uint n = nx + (kx / Nx) * Ni; |
| |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input); |
| input.ptr += n * input.stride_x + get_global_id(1) * input.stride_y + get_global_id(2) * input.stride_z; |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output); |
| output.ptr += n * output.stride_x + get_global_id(1) * output.stride_y + get_global_id(2) * output.stride_z; |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 2 * Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 3 * Nx, 0, 0)); |
| |
| |
| DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; |
| |
| |
| TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); |
| TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2); |
| TWIDDLE_FACTOR_MULTIPLICATION(3 * phi, c3); |
| |
| |
| DFT_4(c0, c1, c2, c3); |
| |
| |
| vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, Nx, 0, 0)); |
| vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 2 * Nx, 0, 0)); |
| vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 3 * Nx, 0, 0)); |
| } |
| |
| |
| __kernel void fft_radix_4_axis_1( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| , |
| uint Nx, uint Ni, float exp_const) |
| { |
| |
| uint kx = get_global_id(1); |
| |
| |
| uint nx = kx % Nx; |
| |
| |
| uint n = nx + (kx / Nx) * Ni; |
| |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input); |
| input.ptr += get_global_id(0) * input.stride_x + n * input.stride_y + get_global_id(2) * input.stride_z; |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output); |
| output.ptr += get_global_id(0) * output.stride_x + n * output.stride_y + get_global_id(2) * output.stride_z; |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2 * Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3 * Nx, 0)); |
| |
| |
| DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; |
| |
| |
| TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); |
| TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2); |
| TWIDDLE_FACTOR_MULTIPLICATION(3 * phi, c3); |
| |
| |
| DFT_4(c0, c1, c2, c3); |
| |
| |
| vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, Nx, 0)); |
| vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2 * Nx, 0)); |
| vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3 * Nx, 0)); |
| } |
| |
| |
| __kernel void fft_radix_5_axis_0( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| , |
| uint Nx, uint Ni, float exp_const) |
| { |
| |
| uint kx = get_global_id(0); |
| |
| |
| uint nx = kx % Nx; |
| |
| |
| uint n = nx + (kx / Nx) * Ni; |
| |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input); |
| input.ptr += n * input.stride_x + get_global_id(1) * input.stride_y + get_global_id(2) * input.stride_z; |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output); |
| output.ptr += n * output.stride_x + get_global_id(1) * output.stride_y + get_global_id(2) * output.stride_z; |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 2 * Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 3 * Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 4 * Nx, 0, 0)); |
| |
| |
| DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; |
| |
| |
| TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); |
| TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2); |
| TWIDDLE_FACTOR_MULTIPLICATION(3 * phi, c3); |
| TWIDDLE_FACTOR_MULTIPLICATION(4 * phi, c4); |
| |
| |
| DFT_5(c0, c1, c2, c3, c4); |
| |
| |
| vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, Nx, 0, 0)); |
| vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 2 * Nx, 0, 0)); |
| vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 3 * Nx, 0, 0)); |
| vstore2(c4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 4 * Nx, 0, 0)); |
| } |
| |
| |
| __kernel void fft_radix_5_axis_1( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| , |
| uint Nx, uint Ni, float exp_const) |
| { |
| |
| uint kx = get_global_id(1); |
| |
| |
| uint nx = kx % Nx; |
| |
| |
| uint n = nx + (kx / Nx) * Ni; |
| |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input); |
| input.ptr += get_global_id(0) * input.stride_x + n * input.stride_y + get_global_id(2) * input.stride_z; |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output); |
| output.ptr += get_global_id(0) * output.stride_x + n * output.stride_y + get_global_id(2) * output.stride_z; |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2 * Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3 * Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 4 * Nx, 0)); |
| |
| |
| DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; |
| |
| |
| TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); |
| TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2); |
| TWIDDLE_FACTOR_MULTIPLICATION(3 * phi, c3); |
| TWIDDLE_FACTOR_MULTIPLICATION(4 * phi, c4); |
| |
| |
| DFT_5(c0, c1, c2, c3, c4); |
| |
| |
| vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, Nx, 0)); |
| vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2 * Nx, 0)); |
| vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3 * Nx, 0)); |
| vstore2(c4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 4 * Nx, 0)); |
| } |
| |
| |
| __kernel void fft_radix_7_axis_0( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| , |
| uint Nx, uint Ni, float exp_const) |
| { |
| |
| uint kx = get_global_id(0); |
| |
| |
| uint nx = kx % Nx; |
| |
| |
| uint n = nx + (kx / Nx) * Ni; |
| |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input); |
| input.ptr += n * input.stride_x + get_global_id(1) * input.stride_y + get_global_id(2) * input.stride_z; |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output); |
| output.ptr += n * output.stride_x + get_global_id(1) * output.stride_y + get_global_id(2) * output.stride_z; |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 2 * Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 3 * Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 4 * Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c5 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 5 * Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c6 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 6 * Nx, 0, 0)); |
| |
| |
| DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; |
| |
| |
| TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); |
| TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2); |
| TWIDDLE_FACTOR_MULTIPLICATION(3 * phi, c3); |
| TWIDDLE_FACTOR_MULTIPLICATION(4 * phi, c4); |
| TWIDDLE_FACTOR_MULTIPLICATION(5 * phi, c5); |
| TWIDDLE_FACTOR_MULTIPLICATION(6 * phi, c6); |
| |
| |
| DFT_7(c0, c1, c2, c3, c4, c5, c6); |
| |
| |
| vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, Nx, 0, 0)); |
| vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 2 * Nx, 0, 0)); |
| vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 3 * Nx, 0, 0)); |
| vstore2(c4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 4 * Nx, 0, 0)); |
| vstore2(c5, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 5 * Nx, 0, 0)); |
| vstore2(c6, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 6 * Nx, 0, 0)); |
| } |
| |
| |
| __kernel void fft_radix_7_axis_1( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| , |
| uint Nx, uint Ni, float exp_const) |
| { |
| |
| uint kx = get_global_id(1); |
| |
| |
| uint nx = kx % Nx; |
| |
| |
| uint n = nx + (kx / Nx) * Ni; |
| |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input); |
| input.ptr += get_global_id(0) * input.stride_x + n * input.stride_y + get_global_id(2) * input.stride_z; |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output); |
| output.ptr += get_global_id(0) * output.stride_x + n * output.stride_y + get_global_id(2) * output.stride_z; |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2 * Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3 * Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 4 * Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c5 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 5 * Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c6 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 6 * Nx, 0)); |
| |
| |
| DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; |
| |
| |
| TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); |
| TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2); |
| TWIDDLE_FACTOR_MULTIPLICATION(3 * phi, c3); |
| TWIDDLE_FACTOR_MULTIPLICATION(4 * phi, c4); |
| TWIDDLE_FACTOR_MULTIPLICATION(5 * phi, c5); |
| TWIDDLE_FACTOR_MULTIPLICATION(6 * phi, c6); |
| |
| |
| DFT_7(c0, c1, c2, c3, c4, c5, c6); |
| |
| |
| vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, Nx, 0)); |
| vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2 * Nx, 0)); |
| vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3 * Nx, 0)); |
| vstore2(c4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 4 * Nx, 0)); |
| vstore2(c5, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 5 * Nx, 0)); |
| vstore2(c6, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 6 * Nx, 0)); |
| } |
| |
| |
| __kernel void fft_radix_8_axis_0( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| , |
| uint Nx, uint Ni, float exp_const) |
| { |
| |
| uint kx = get_global_id(0); |
| |
| |
| uint nx = kx % Nx; |
| |
| |
| uint n = nx + (kx / Nx) * Ni; |
| |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input); |
| input.ptr += n * input.stride_x + get_global_id(1) * input.stride_y + get_global_id(2) * input.stride_z; |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output); |
| output.ptr += n * output.stride_x + get_global_id(1) * output.stride_y + get_global_id(2) * output.stride_z; |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 2 * Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 3 * Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 4 * Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c5 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 5 * Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c6 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 6 * Nx, 0, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c7 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 7 * Nx, 0, 0)); |
| |
| |
| DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; |
| |
| |
| TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); |
| TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2); |
| TWIDDLE_FACTOR_MULTIPLICATION(3 * phi, c3); |
| TWIDDLE_FACTOR_MULTIPLICATION(4 * phi, c4); |
| TWIDDLE_FACTOR_MULTIPLICATION(5 * phi, c5); |
| TWIDDLE_FACTOR_MULTIPLICATION(6 * phi, c6); |
| TWIDDLE_FACTOR_MULTIPLICATION(7 * phi, c7); |
| |
| |
| DFT_8(c0, c1, c2, c3, c4, c5, c6, c7); |
| |
| |
| vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, Nx, 0, 0)); |
| vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 2 * Nx, 0, 0)); |
| vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 3 * Nx, 0, 0)); |
| vstore2(c4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 4 * Nx, 0, 0)); |
| vstore2(c5, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 5 * Nx, 0, 0)); |
| vstore2(c6, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 6 * Nx, 0, 0)); |
| vstore2(c7, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 7 * Nx, 0, 0)); |
| } |
| |
| |
| __kernel void fft_radix_8_axis_1( |
| TENSOR3D_DECLARATION(input) |
| #ifndef IN_PLACE |
| , |
| TENSOR3D_DECLARATION(output) |
| #endif |
| , |
| uint Nx, uint Ni, float exp_const) |
| { |
| |
| uint kx = get_global_id(1); |
| |
| |
| uint nx = kx % Nx; |
| |
| |
| uint n = nx + (kx / Nx) * Ni; |
| |
| |
| Tensor3D input = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(input); |
| input.ptr += get_global_id(0) * input.stride_x + n * input.stride_y + get_global_id(2) * input.stride_z; |
| #ifdef IN_PLACE |
| Tensor3D output = input; |
| #else |
| Tensor3D output = CONVERT_TO_TENSOR3D_STRUCT_NO_STEP(output); |
| output.ptr += get_global_id(0) * output.stride_x + n * output.stride_y + get_global_id(2) * output.stride_z; |
| #endif |
| |
| |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c0 = vload2(0, (__global DATA_TYPE *)input.ptr); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c1 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c2 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 2 * Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c3 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 3 * Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c4 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 4 * Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c5 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 5 * Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c6 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 6 * Nx, 0)); |
| VEC_DATA_TYPE(DATA_TYPE, 2) |
| c7 = vload2(0, (__global DATA_TYPE *)tensor3D_offset(&input, 0, 7 * Nx, 0)); |
| |
| |
| DATA_TYPE phi = (DATA_TYPE)nx * (DATA_TYPE)exp_const; |
| |
| |
| TWIDDLE_FACTOR_MULTIPLICATION(phi, c1); |
| TWIDDLE_FACTOR_MULTIPLICATION(2 * phi, c2); |
| TWIDDLE_FACTOR_MULTIPLICATION(3 * phi, c3); |
| TWIDDLE_FACTOR_MULTIPLICATION(4 * phi, c4); |
| TWIDDLE_FACTOR_MULTIPLICATION(5 * phi, c5); |
| TWIDDLE_FACTOR_MULTIPLICATION(6 * phi, c6); |
| TWIDDLE_FACTOR_MULTIPLICATION(7 * phi, c7); |
| |
| |
| DFT_8(c0, c1, c2, c3, c4, c5, c6, c7); |
| |
| |
| vstore2(c0, 0, (__global DATA_TYPE *)output.ptr); |
| vstore2(c1, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, Nx, 0)); |
| vstore2(c2, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 2 * Nx, 0)); |
| vstore2(c3, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 3 * Nx, 0)); |
| vstore2(c4, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 4 * Nx, 0)); |
| vstore2(c5, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 5 * Nx, 0)); |
| vstore2(c6, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 6 * Nx, 0)); |
| vstore2(c7, 0, (__global DATA_TYPE *)tensor3D_offset(&output, 0, 7 * Nx, 0)); |
| } |
| #endif )" |