summaryrefslogtreecommitdiff
path: root/lib/sanitizer_common/sanitizer_syscall_linux_aarch64.inc
blob: 1f05ed9b6c1c47dc59405adfdefb347d006908b5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
//===-- sanitizer_syscall_linux_aarch64.inc --------------------*- C++ -*-===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// Implementations of internal_syscall and internal_iserror for Linux/aarch64.
//
//===----------------------------------------------------------------------===//

#define SYSCALL(name) __NR_ ## name

static uptr __internal_syscall(u64 nr) {
  register u64 x8 asm("x8") = nr;
  register u64 x0 asm("x0");
  asm volatile("svc 0"
               : "=r"(x0)
               : "r"(x8)
               : "memory", "cc");
  return x0;
}
#define __internal_syscall0(n) \
  (__internal_syscall)(n)

static uptr __internal_syscall(u64 nr, u64 arg1) {
  register u64 x8 asm("x8") = nr;
  register u64 x0 asm("x0") = arg1;
  asm volatile("svc 0"
               : "=r"(x0)
               : "r"(x8), "0"(x0)
               : "memory", "cc");
  return x0;
}
#define __internal_syscall1(n, a1) \
  (__internal_syscall)(n, (u64)(a1))

static uptr __internal_syscall(u64 nr, u64 arg1, long arg2) {
  register u64 x8 asm("x8") = nr;
  register u64 x0 asm("x0") = arg1;
  register u64 x1 asm("x1") = arg2;
  asm volatile("svc 0"
               : "=r"(x0)
               : "r"(x8), "0"(x0), "r"(x1)
               : "memory", "cc");
  return x0;
}
#define __internal_syscall2(n, a1, a2) \
  (__internal_syscall)(n, (u64)(a1), (long)(a2))

static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3) {
  register u64 x8 asm("x8") = nr;
  register u64 x0 asm("x0") = arg1;
  register u64 x1 asm("x1") = arg2;
  register u64 x2 asm("x2") = arg3;
  asm volatile("svc 0"
               : "=r"(x0)
               : "r"(x8), "0"(x0), "r"(x1), "r"(x2)
               : "memory", "cc");
  return x0;
}
#define __internal_syscall3(n, a1, a2, a3) \
  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3))

static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
                               u64 arg4) {
  register u64 x8 asm("x8") = nr;
  register u64 x0 asm("x0") = arg1;
  register u64 x1 asm("x1") = arg2;
  register u64 x2 asm("x2") = arg3;
  register u64 x3 asm("x3") = arg4;
  asm volatile("svc 0"
               : "=r"(x0)
               : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3)
               : "memory", "cc");
  return x0;
}
#define __internal_syscall4(n, a1, a2, a3, a4) \
  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4))

static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
                               u64 arg4, long arg5) {
  register u64 x8 asm("x8") = nr;
  register u64 x0 asm("x0") = arg1;
  register u64 x1 asm("x1") = arg2;
  register u64 x2 asm("x2") = arg3;
  register u64 x3 asm("x3") = arg4;
  register u64 x4 asm("x4") = arg5;
  asm volatile("svc 0"
               : "=r"(x0)
               : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4)
               : "memory", "cc");
  return x0;
}
#define __internal_syscall5(n, a1, a2, a3, a4, a5) \
  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
                       (u64)(a5))

static uptr __internal_syscall(u64 nr, u64 arg1, long arg2, long arg3,
                               u64 arg4, long arg5, long arg6) {
  register u64 x8 asm("x8") = nr;
  register u64 x0 asm("x0") = arg1;
  register u64 x1 asm("x1") = arg2;
  register u64 x2 asm("x2") = arg3;
  register u64 x3 asm("x3") = arg4;
  register u64 x4 asm("x4") = arg5;
  register u64 x5 asm("x5") = arg6;
  asm volatile("svc 0"
               : "=r"(x0)
               : "r"(x8), "0"(x0), "r"(x1), "r"(x2), "r"(x3), "r"(x4), "r"(x5)
               : "memory", "cc");
  return x0;
}
#define __internal_syscall6(n, a1, a2, a3, a4, a5, a6) \
  (__internal_syscall)(n, (u64)(a1), (long)(a2), (long)(a3), (long)(a4), \
                       (u64)(a5), (long)(a6))

#define __SYSCALL_NARGS_X(a1, a2, a3, a4, a5, a6, a7, a8, n, ...) n
#define __SYSCALL_NARGS(...) \
  __SYSCALL_NARGS_X(__VA_ARGS__, 7, 6, 5, 4, 3, 2, 1, 0, )
#define __SYSCALL_CONCAT_X(a, b) a##b
#define __SYSCALL_CONCAT(a, b) __SYSCALL_CONCAT_X(a, b)
#define __SYSCALL_DISP(b, ...) \
  __SYSCALL_CONCAT(b, __SYSCALL_NARGS(__VA_ARGS__))(__VA_ARGS__)

#define internal_syscall(...) __SYSCALL_DISP(__internal_syscall, __VA_ARGS__)

#define internal_syscall_ptr internal_syscall
#define internal_syscall64 internal_syscall

// Helper function used to avoid cobbler errno.
bool internal_iserror(uptr retval, int *rverrno) {
  if (retval >= (uptr)-4095) {
    if (rverrno)
      *rverrno = -retval;
    return true;
  }
  return false;
}