|
| 1 | +/* SPDX-License-Identifier: LGPL-2.1 OR MIT */ |
| 2 | +/* |
| 3 | + * AARCH64 specific definitions for NOLIBC |
| 4 | + * Copyright (C) 2017-2022 Willy Tarreau <[email protected]> |
| 5 | + */ |
| 6 | + |
| 7 | +#ifndef _NOLIBC_ARCH_AARCH64_H |
| 8 | +#define _NOLIBC_ARCH_AARCH64_H |
| 9 | + |
| 10 | +/* O_* macros for fcntl/open are architecture-specific */ |
| 11 | +#define O_RDONLY 0 |
| 12 | +#define O_WRONLY 1 |
| 13 | +#define O_RDWR 2 |
| 14 | +#define O_CREAT 0x40 |
| 15 | +#define O_EXCL 0x80 |
| 16 | +#define O_NOCTTY 0x100 |
| 17 | +#define O_TRUNC 0x200 |
| 18 | +#define O_APPEND 0x400 |
| 19 | +#define O_NONBLOCK 0x800 |
| 20 | +#define O_DIRECTORY 0x4000 |
| 21 | + |
| 22 | +/* The struct returned by the newfstatat() syscall. Differs slightly from the |
| 23 | + * x86_64's stat one by field ordering, so be careful. |
| 24 | + */ |
| 25 | +struct sys_stat_struct { |
| 26 | + unsigned long st_dev; |
| 27 | + unsigned long st_ino; |
| 28 | + unsigned int st_mode; |
| 29 | + unsigned int st_nlink; |
| 30 | + unsigned int st_uid; |
| 31 | + unsigned int st_gid; |
| 32 | + |
| 33 | + unsigned long st_rdev; |
| 34 | + unsigned long __pad1; |
| 35 | + long st_size; |
| 36 | + int st_blksize; |
| 37 | + int __pad2; |
| 38 | + |
| 39 | + long st_blocks; |
| 40 | + long st_atime; |
| 41 | + unsigned long st_atime_nsec; |
| 42 | + long st_mtime; |
| 43 | + |
| 44 | + unsigned long st_mtime_nsec; |
| 45 | + long st_ctime; |
| 46 | + unsigned long st_ctime_nsec; |
| 47 | + unsigned int __unused[2]; |
| 48 | +}; |
| 49 | + |
| 50 | +/* Syscalls for AARCH64 : |
| 51 | + * - registers are 64-bit |
| 52 | + * - stack is 16-byte aligned |
| 53 | + * - syscall number is passed in x8 |
| 54 | + * - arguments are in x0, x1, x2, x3, x4, x5 |
| 55 | + * - the system call is performed by calling svc 0 |
| 56 | + * - syscall return comes in x0. |
| 57 | + * - the arguments are cast to long and assigned into the target registers |
| 58 | + * which are then simply passed as registers to the asm code, so that we |
| 59 | + * don't have to experience issues with register constraints. |
| 60 | + * |
| 61 | + * On aarch64, select() is not implemented so we have to use pselect6(). |
| 62 | + */ |
| 63 | +#define __ARCH_WANT_SYS_PSELECT6 |
| 64 | + |
| 65 | +#define my_syscall0(num) \ |
| 66 | +({ \ |
| 67 | + register long _num __asm__ ("x8") = (num); \ |
| 68 | + register long _arg1 __asm__ ("x0"); \ |
| 69 | + \ |
| 70 | + __asm__ volatile ( \ |
| 71 | + "svc #0\n" \ |
| 72 | + : "=r"(_arg1) \ |
| 73 | + : "r"(_num) \ |
| 74 | + : "memory", "cc" \ |
| 75 | + ); \ |
| 76 | + _arg1; \ |
| 77 | +}) |
| 78 | + |
| 79 | +#define my_syscall1(num, arg1) \ |
| 80 | +({ \ |
| 81 | + register long _num __asm__ ("x8") = (num); \ |
| 82 | + register long _arg1 __asm__ ("x0") = (long)(arg1); \ |
| 83 | + \ |
| 84 | + __asm__ volatile ( \ |
| 85 | + "svc #0\n" \ |
| 86 | + : "=r"(_arg1) \ |
| 87 | + : "r"(_arg1), \ |
| 88 | + "r"(_num) \ |
| 89 | + : "memory", "cc" \ |
| 90 | + ); \ |
| 91 | + _arg1; \ |
| 92 | +}) |
| 93 | + |
| 94 | +#define my_syscall2(num, arg1, arg2) \ |
| 95 | +({ \ |
| 96 | + register long _num __asm__ ("x8") = (num); \ |
| 97 | + register long _arg1 __asm__ ("x0") = (long)(arg1); \ |
| 98 | + register long _arg2 __asm__ ("x1") = (long)(arg2); \ |
| 99 | + \ |
| 100 | + __asm__ volatile ( \ |
| 101 | + "svc #0\n" \ |
| 102 | + : "=r"(_arg1) \ |
| 103 | + : "r"(_arg1), "r"(_arg2), \ |
| 104 | + "r"(_num) \ |
| 105 | + : "memory", "cc" \ |
| 106 | + ); \ |
| 107 | + _arg1; \ |
| 108 | +}) |
| 109 | + |
| 110 | +#define my_syscall3(num, arg1, arg2, arg3) \ |
| 111 | +({ \ |
| 112 | + register long _num __asm__ ("x8") = (num); \ |
| 113 | + register long _arg1 __asm__ ("x0") = (long)(arg1); \ |
| 114 | + register long _arg2 __asm__ ("x1") = (long)(arg2); \ |
| 115 | + register long _arg3 __asm__ ("x2") = (long)(arg3); \ |
| 116 | + \ |
| 117 | + __asm__ volatile ( \ |
| 118 | + "svc #0\n" \ |
| 119 | + : "=r"(_arg1) \ |
| 120 | + : "r"(_arg1), "r"(_arg2), "r"(_arg3), \ |
| 121 | + "r"(_num) \ |
| 122 | + : "memory", "cc" \ |
| 123 | + ); \ |
| 124 | + _arg1; \ |
| 125 | +}) |
| 126 | + |
| 127 | +#define my_syscall4(num, arg1, arg2, arg3, arg4) \ |
| 128 | +({ \ |
| 129 | + register long _num __asm__ ("x8") = (num); \ |
| 130 | + register long _arg1 __asm__ ("x0") = (long)(arg1); \ |
| 131 | + register long _arg2 __asm__ ("x1") = (long)(arg2); \ |
| 132 | + register long _arg3 __asm__ ("x2") = (long)(arg3); \ |
| 133 | + register long _arg4 __asm__ ("x3") = (long)(arg4); \ |
| 134 | + \ |
| 135 | + __asm__ volatile ( \ |
| 136 | + "svc #0\n" \ |
| 137 | + : "=r"(_arg1) \ |
| 138 | + : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), \ |
| 139 | + "r"(_num) \ |
| 140 | + : "memory", "cc" \ |
| 141 | + ); \ |
| 142 | + _arg1; \ |
| 143 | +}) |
| 144 | + |
| 145 | +#define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \ |
| 146 | +({ \ |
| 147 | + register long _num __asm__ ("x8") = (num); \ |
| 148 | + register long _arg1 __asm__ ("x0") = (long)(arg1); \ |
| 149 | + register long _arg2 __asm__ ("x1") = (long)(arg2); \ |
| 150 | + register long _arg3 __asm__ ("x2") = (long)(arg3); \ |
| 151 | + register long _arg4 __asm__ ("x3") = (long)(arg4); \ |
| 152 | + register long _arg5 __asm__ ("x4") = (long)(arg5); \ |
| 153 | + \ |
| 154 | + __asm__ volatile ( \ |
| 155 | + "svc #0\n" \ |
| 156 | + : "=r" (_arg1) \ |
| 157 | + : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ |
| 158 | + "r"(_num) \ |
| 159 | + : "memory", "cc" \ |
| 160 | + ); \ |
| 161 | + _arg1; \ |
| 162 | +}) |
| 163 | + |
| 164 | +#define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \ |
| 165 | +({ \ |
| 166 | + register long _num __asm__ ("x8") = (num); \ |
| 167 | + register long _arg1 __asm__ ("x0") = (long)(arg1); \ |
| 168 | + register long _arg2 __asm__ ("x1") = (long)(arg2); \ |
| 169 | + register long _arg3 __asm__ ("x2") = (long)(arg3); \ |
| 170 | + register long _arg4 __asm__ ("x3") = (long)(arg4); \ |
| 171 | + register long _arg5 __asm__ ("x4") = (long)(arg5); \ |
| 172 | + register long _arg6 __asm__ ("x5") = (long)(arg6); \ |
| 173 | + \ |
| 174 | + __asm__ volatile ( \ |
| 175 | + "svc #0\n" \ |
| 176 | + : "=r" (_arg1) \ |
| 177 | + : "r"(_arg1), "r"(_arg2), "r"(_arg3), "r"(_arg4), "r"(_arg5), \ |
| 178 | + "r"(_arg6), "r"(_num) \ |
| 179 | + : "memory", "cc" \ |
| 180 | + ); \ |
| 181 | + _arg1; \ |
| 182 | +}) |
| 183 | + |
| 184 | +/* startup code */ |
| 185 | +__asm__ (".section .text\n" |
| 186 | + ".weak _start\n" |
| 187 | + "_start:\n" |
| 188 | + "ldr x0, [sp]\n" // argc (x0) was in the stack |
| 189 | + "add x1, sp, 8\n" // argv (x1) = sp |
| 190 | + "lsl x2, x0, 3\n" // envp (x2) = 8*argc ... |
| 191 | + "add x2, x2, 8\n" // + 8 (skip null) |
| 192 | + "add x2, x2, x1\n" // + argv |
| 193 | + "and sp, x1, -16\n" // sp must be 16-byte aligned in the callee |
| 194 | + "bl main\n" // main() returns the status code, we'll exit with it. |
| 195 | + "mov x8, 93\n" // NR_exit == 93 |
| 196 | + "svc #0\n" |
| 197 | + ""); |
| 198 | + |
| 199 | +#endif // _NOLIBC_ARCH_AARCH64_H |
0 commit comments