|
33 | 33 | #define SHIVA_T_PAIR_W6(var) register int64_t var asm("w6"); |
34 | 34 | #define SHIVA_T_PAIR_W7(var) register int64_t var asm("w7"); |
35 | 35 |
|
| 36 | + |
| 37 | +/* |
| 38 | + * XXX |
| 39 | + * The following macros use register x9. This means you can't use |
| 40 | + * more than one macro at a time without clobbering x9. |
| 41 | + * |
| 42 | + * In the future: Allow the developer to select the register as it |
| 43 | + * greatly depends on the function being transformed. |
| 44 | + */ |
36 | 45 | #define SHIVA_T_PAIR_BP_16(var) asm volatile ("ldr x9, [x29, #16]"); \ |
37 | 46 | register int64_t var asm("x9"); |
| 47 | +#define SHIVA_T_PAIR_BP_24(var) asm volatile ("ldr x9, [x29, #24]"); \ |
| 48 | + register int64_t var asm("x9"); |
38 | 49 | #define SHIVA_T_PAIR_BP_32(var) asm volatile ("ldr x9, [x29, #32]"); \ |
39 | 50 | register int64_t var asm("x9"); |
| 51 | +#define SHIVA_T_PAIR_BP_40(var) asm volatile ("ldr x9, [x29, #40]"); \ |
| 52 | + register int64_t var asm("x9"); |
40 | 53 | #define SHIVA_T_PAIR_BP_48(var) asm volatile ("ldr x9, [x29, #48]"); \ |
41 | 54 | register int64_t var asm("x9"); |
| 55 | +#define SHIVA_T_PAIR_BP_56(var) asm volatile ("ldr x9, [x29, #56]"); \ |
| 56 | + register int64_t var asm("x9"); |
42 | 57 | #define SHIVA_T_PAIR_BP_64(var) asm volatile ("ldr x9, [x29, #64]"); \ |
43 | 58 | register int64_t var asm("x9"); |
| 59 | +#define SHIVA_T_PAIR_BP_72(var) asm volatile ("ldr x9, [x29, #64"); \ |
| 60 | + register int64_t var asm("x9"); |
44 | 61 | #define SHIVA_T_PAIR_BP_80(var) asm volatile ("ldr x9, [x29, #80]"); \ |
45 | 62 | register int64_t var asm("x9"); |
| 63 | +#define SHIVA_T_PAIR_BP_88(var) asm volatile ("ldr x9, [x29, #88]"); \ |
| 64 | + register int64_t var asm("x9"); |
46 | 65 | #define SHIVA_T_PAIR_BP_96(var) asm volatile ("ldr x9, [x29, #96]"); \ |
47 | 66 | register int64_t var asm("x9"); |
48 | 67 |
|
| 68 | + |
| 69 | +#define SHIVA_T_LEA_BP_16(var) asm volatile ("mov x9, x29\n" \ |
| 70 | + "add x9, x9, #16"); \ |
| 71 | + register int64_t var asm("x9"); |
| 72 | + |
| 73 | +#define SHIVA_T_LEA_BP_24(var) asm volatile ("mov x9, x29\n" \ |
| 74 | + "add x9, x9, #24"); \ |
| 75 | + register int64_t var asm("x9"); |
| 76 | + |
| 77 | +#define SHIVA_T_LEA_BP_32(var) asm volatile ("mov x9, x29\n" \ |
| 78 | + "add x9, x9, #32"); \ |
| 79 | + register int64_t var asm("x9"); |
| 80 | + |
| 81 | +#define SHIVA_T_LEA_BP_40(var) asm volatile ("mov x9, x29\n" \ |
| 82 | + "add x9, x9, #40"); \ |
| 83 | + register int64_t var asm("x9"); |
| 84 | + |
| 85 | +#define SHIVA_T_LEA_BP_48(var) asm volatile ("mov x9, x29\n" \ |
| 86 | + "add x9, x9, #48"); \ |
| 87 | + register int64_t var asm("x9"); |
| 88 | + |
| 89 | +#define SHIVA_T_LEA_BP_56(var) asm volatile ("mov x9, x29\n" \ |
| 90 | + "add x9, x9, #54"); \ |
| 91 | + register int64_t var asm("x9"); |
| 92 | + |
| 93 | +#define SHIVA_T_LEA_BP_96(var) asm volatile ("mov x9, x29\n" \ |
| 94 | + "add x9, x9, #96"); \ |
| 95 | + register int64_t var asm("x9"); |
| 96 | + |
49 | 97 | #define SHIVA_HELPER_CALL_EXTERNAL_ID "__shiva_helper_orig_func_" |
50 | 98 |
|
51 | 99 | #define SHIVA_HELPER_CALL_EXTERNAL(name) \ |
|
0 commit comments