aboutsummaryrefslogtreecommitdiffstats
path: root/src/include
diff options
context:
space:
mode:
authorLibravatar Kelvin M. Klann <kmk3.code@protonmail.com>2023-02-19 17:57:48 -0300
committerLibravatar Kelvin M. Klann <kmk3.code@protonmail.com>2023-02-20 18:02:37 -0300
commitab1f8af8d0ac1ff8466d8758dc11d06dbfe8df25 (patch)
tree7ff503cf1ad951be3c2dc88caea1f4e1d6c70699 /src/include
parent*.sh: use consistent indentation (diff)
downloadfirejail-ab1f8af8d0ac1ff8466d8758dc11d06dbfe8df25.tar.gz
firejail-ab1f8af8d0ac1ff8466d8758dc11d06dbfe8df25.tar.zst
firejail-ab1f8af8d0ac1ff8466d8758dc11d06dbfe8df25.zip
Fix inconsistent leading spaces
Changes: * Fix spaces being used for indentation in some lines in C * Remove leading spaces before some goto labels * Remove leading spaces before the start of some multiline comments * Change leading spaces to tabs in some multiline macros * Add missing asterisk to some multiline comments (to match other multiline comments and because they are false positives in the commands below) Note: Leading spaces can be used for alignment (such as in function parameters and function arguments in C) and for line continuation (such as in long commands in shell scripts). However, in the above changes the leading spaces are used for other reasons and do not seem to fit with the style used. Commands used to search for errors: $ git grep -In '^ [^*]' | grep -E -v \ -e '(COPYING|README|RELNOTES|configure(.ac)?):' \ -e '^[^:]+.(md|yml|py):' -e '(bash|zsh)_completion/' \ -e '^contrib/syntax/' -e '^etc/templates/.*\.txt:' -e '^m4/' \ -e '^platform/debian/' -e '^src/man/.*\.txt:' \ -e '.*mkrpm.sh:' -e '.*extract_errnos.sh:'
Diffstat (limited to 'src/include')
-rw-r--r--src/include/gcov_wrapper.h4
-rw-r--r--src/include/seccomp.h24
2 files changed, 14 insertions, 14 deletions
diff --git a/src/include/gcov_wrapper.h b/src/include/gcov_wrapper.h
index b56b4e736..7c8f89d59 100644
--- a/src/include/gcov_wrapper.h
+++ b/src/include/gcov_wrapper.h
@@ -33,8 +33,8 @@
33 */ 33 */
34#if __GNUC__ > 11 || (__GNUC__ == 11 && __GNUC_MINOR__ >= 1) 34#if __GNUC__ > 11 || (__GNUC__ == 11 && __GNUC_MINOR__ >= 1)
35static void __gcov_flush(void) { 35static void __gcov_flush(void) {
36 __gcov_dump(); 36 __gcov_dump();
37 __gcov_reset(); 37 __gcov_reset();
38} 38}
39#endif 39#endif
40#else 40#else
diff --git a/src/include/seccomp.h b/src/include/seccomp.h
index ef3fe6c3a..fc07b75ce 100644
--- a/src/include/seccomp.h
+++ b/src/include/seccomp.h
@@ -194,24 +194,24 @@
194#endif 194#endif
195 195
196#define VALIDATE_ARCHITECTURE \ 196#define VALIDATE_ARCHITECTURE \
197 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, (offsetof(struct seccomp_data, arch))), \ 197 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, (offsetof(struct seccomp_data, arch))), \
198 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, ARCH_NR, 1, 0), \ 198 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, ARCH_NR, 1, 0), \
199 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW) 199 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
200 200
201#define VALIDATE_ARCHITECTURE_KILL \ 201#define VALIDATE_ARCHITECTURE_KILL \
202 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, (offsetof(struct seccomp_data, arch))), \ 202 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, (offsetof(struct seccomp_data, arch))), \
203 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, ARCH_NR, 1, 0), \ 203 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, ARCH_NR, 1, 0), \
204 KILL_OR_RETURN_ERRNO 204 KILL_OR_RETURN_ERRNO
205 205
206#define VALIDATE_ARCHITECTURE_64 \ 206#define VALIDATE_ARCHITECTURE_64 \
207 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, (offsetof(struct seccomp_data, arch))), \ 207 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, (offsetof(struct seccomp_data, arch))), \
208 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, ARCH_64, 1, 0), \ 208 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, ARCH_64, 1, 0), \
209 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW) 209 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
210 210
211#define VALIDATE_ARCHITECTURE_32 \ 211#define VALIDATE_ARCHITECTURE_32 \
212 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, (offsetof(struct seccomp_data, arch))), \ 212 BPF_STMT(BPF_LD+BPF_W+BPF_ABS, (offsetof(struct seccomp_data, arch))), \
213 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, ARCH_32, 1, 0), \ 213 BPF_JUMP(BPF_JMP+BPF_JEQ+BPF_K, ARCH_32, 1, 0), \
214 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW) 214 BPF_STMT(BPF_RET+BPF_K, SECCOMP_RET_ALLOW)
215 215
216#ifndef X32_SYSCALL_BIT 216#ifndef X32_SYSCALL_BIT
217#define X32_SYSCALL_BIT 0x40000000 217#define X32_SYSCALL_BIT 0x40000000