]>
Commit | Line | Data |
---|---|---|
8befd5cc MG |
1 | /** |
2 | * Seccomp BPF Translator | |
3 | * | |
4 | * Copyright (c) 2012 Red Hat <pmoore@redhat.com> | |
5 | * Author: Paul Moore <paul@paul-moore.com> | |
6 | */ | |
7 | ||
8 | /* | |
9 | * This library is free software; you can redistribute it and/or modify it | |
10 | * under the terms of version 2.1 of the GNU Lesser General Public License as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This library is distributed in the hope that it will be useful, but WITHOUT | |
14 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
15 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU Lesser General Public License | |
19 | * along with this library; if not, see <http://www.gnu.org/licenses>. | |
20 | */ | |
21 | ||
22 | #include <errno.h> | |
23 | #include <inttypes.h> | |
24 | #include <stdlib.h> | |
25 | #include <stdio.h> | |
26 | #include <string.h> | |
27 | #include <stdbool.h> | |
28 | ||
29 | #ifndef _BSD_SOURCE | |
30 | #define _BSD_SOURCE | |
31 | #endif | |
32 | #include <endian.h> | |
33 | ||
34 | #include <seccomp.h> | |
35 | ||
36 | #include "arch.h" | |
37 | #include "arch-x32.h" | |
38 | #include "gen_bpf.h" | |
39 | #include "db.h" | |
40 | #include "hash.h" | |
41 | #include "system.h" | |
42 | ||
43 | /* allocation increments */ | |
44 | #define AINC_BLK 2 | |
45 | #define AINC_PROG 64 | |
46 | ||
47 | struct acc_state { | |
48 | int32_t offset; | |
49 | uint32_t mask; | |
50 | }; | |
51 | #define _ACC_STATE(x,y) \ | |
52 | (struct acc_state){ .offset = (x), .mask = (y) } | |
53 | #define _ACC_STATE_OFFSET(x) \ | |
54 | _ACC_STATE(x,ARG_MASK_MAX) | |
55 | #define _ACC_STATE_UNDEF \ | |
56 | _ACC_STATE_OFFSET(-1) | |
57 | #define _ACC_CMP_EQ(x,y) \ | |
58 | ((x).offset == (y).offset && (x).mask == (y).mask) | |
59 | ||
60 | enum bpf_jump_type { | |
61 | TGT_NONE = 0, | |
62 | TGT_K, /* immediate "k" value */ | |
63 | TGT_NXT, /* fall through to the next block */ | |
64 | TGT_IMM, /* resolved immediate value */ | |
65 | TGT_PTR_DB, /* pointer to part of the filter db */ | |
66 | TGT_PTR_BLK, /* pointer to an instruction block */ | |
67 | TGT_PTR_HSH, /* pointer to a block hash table */ | |
68 | }; | |
69 | ||
70 | struct bpf_jump { | |
71 | union { | |
72 | uint8_t imm_j; | |
73 | uint32_t imm_k; | |
74 | uint64_t hash; | |
75 | struct db_arg_chain_tree *db; | |
76 | struct bpf_blk *blk; | |
77 | unsigned int nxt; | |
78 | } tgt; | |
79 | enum bpf_jump_type type; | |
80 | }; | |
81 | #define _BPF_OP(a,x) \ | |
82 | (_htot16(a,x)) | |
83 | #define _BPF_JMP_NO \ | |
84 | ((struct bpf_jump) { .type = TGT_NONE }) | |
85 | #define _BPF_JMP_NXT(x) \ | |
86 | ((struct bpf_jump) { .type = TGT_NXT, .tgt = { .nxt = (x) } }) | |
87 | #define _BPF_JMP_IMM(x) \ | |
88 | ((struct bpf_jump) { .type = TGT_IMM, .tgt = { .imm_j = (x) } }) | |
89 | #define _BPF_JMP_DB(x) \ | |
90 | ((struct bpf_jump) { .type = TGT_PTR_DB, .tgt = { .db = (x) } }) | |
91 | #define _BPF_JMP_BLK(x) \ | |
92 | ((struct bpf_jump) { .type = TGT_PTR_BLK, .tgt = { .blk = (x) } }) | |
93 | #define _BPF_JMP_HSH(x) \ | |
94 | ((struct bpf_jump) { .type = TGT_PTR_HSH, .tgt = { .hash = (x) } }) | |
95 | #define _BPF_K(a,x) \ | |
96 | ((struct bpf_jump) { .type = TGT_K, .tgt = { .imm_k = _htot32(a,x) } }) | |
97 | #define _BPF_JMP_MAX 255 | |
98 | #define _BPF_JMP_MAX_RET 255 | |
99 | ||
100 | struct bpf_instr { | |
101 | uint16_t op; | |
102 | struct bpf_jump jt; | |
103 | struct bpf_jump jf; | |
104 | struct bpf_jump k; | |
105 | }; | |
106 | #define _BPF_OFFSET_SYSCALL (offsetof(struct seccomp_data, nr)) | |
107 | #define _BPF_SYSCALL(a) _BPF_K(a,_BPF_OFFSET_SYSCALL) | |
108 | #define _BPF_OFFSET_ARCH (offsetof(struct seccomp_data, arch)) | |
109 | #define _BPF_ARCH(a) _BPF_K(a,_BPF_OFFSET_ARCH) | |
110 | ||
111 | struct bpf_blk { | |
112 | /* bpf instructions */ | |
113 | struct bpf_instr *blks; | |
114 | unsigned int blk_cnt; | |
115 | unsigned int blk_alloc; | |
116 | ||
117 | /* accumulator state */ | |
118 | struct acc_state acc_start; | |
119 | struct acc_state acc_end; | |
120 | ||
121 | /* priority - higher is better */ | |
122 | unsigned int priority; | |
123 | ||
124 | /* status flags */ | |
125 | bool flag_hash; /* added to the hash table */ | |
126 | bool flag_dup; /* duplicate block and in use */ | |
127 | bool flag_unique; /* ->blks is unique to this block */ | |
128 | ||
129 | /* original db_arg_chain_tree node */ | |
130 | const struct db_arg_chain_tree *node; | |
131 | ||
132 | /* used during block assembly */ | |
133 | uint64_t hash; | |
134 | struct bpf_blk *hash_nxt; | |
135 | struct bpf_blk *prev, *next; | |
136 | struct bpf_blk *lvl_prv, *lvl_nxt; | |
137 | }; | |
138 | #define _BLK_MSZE(x) \ | |
139 | ((x)->blk_cnt * sizeof(*((x)->blks))) | |
140 | ||
141 | struct bpf_hash_bkt { | |
142 | struct bpf_blk *blk; | |
143 | struct bpf_hash_bkt *next; | |
144 | unsigned int found; | |
145 | }; | |
146 | ||
147 | #define _BPF_HASH_BITS 8 | |
148 | #define _BPF_HASH_SIZE (1 << _BPF_HASH_BITS) | |
149 | #define _BPF_HASH_MASK (_BPF_HASH_BITS - 1) | |
150 | struct bpf_state { | |
151 | /* block hash table */ | |
152 | struct bpf_hash_bkt *htbl[_BPF_HASH_SIZE]; | |
153 | ||
154 | /* filter attributes */ | |
155 | const struct db_filter_attr *attr; | |
156 | /* bad arch action */ | |
157 | uint64_t bad_arch_hsh; | |
158 | /* default action */ | |
159 | uint64_t def_hsh; | |
160 | ||
161 | /* target arch - NOTE: be careful, temporary use only! */ | |
162 | const struct arch_def *arch; | |
163 | ||
164 | /* bpf program */ | |
165 | struct bpf_program *bpf; | |
166 | }; | |
167 | ||
168 | /** | |
169 | * Populate a BPF instruction | |
170 | * @param _ins the BPF instruction | |
171 | * @param _op the BPF operand | |
172 | * @param _jt the BPF jt value | |
173 | * @param _jf the BPF jf value | |
174 | * @param _k the BPF k value | |
175 | * | |
176 | * Set the given values on the provided bpf_instr struct. | |
177 | * | |
178 | */ | |
179 | #define _BPF_INSTR(_ins,_op,_jt,_jf,_k) \ | |
180 | do { \ | |
181 | memset(&(_ins), 0, sizeof(_ins)); \ | |
182 | (_ins).op = (_op); \ | |
183 | (_ins).jt = _jt; \ | |
184 | (_ins).jf = _jf; \ | |
185 | (_ins).k = _k; \ | |
186 | } while (0) | |
187 | ||
188 | static struct bpf_blk *_gen_bpf_chain(struct bpf_state *state, | |
189 | const struct db_sys_list *sys, | |
190 | const struct db_arg_chain_tree *chain, | |
191 | const struct bpf_jump *nxt_jump, | |
192 | struct acc_state *a_state); | |
193 | ||
194 | static struct bpf_blk *_hsh_remove(struct bpf_state *state, uint64_t h_val); | |
195 | static struct bpf_blk *_hsh_find(const struct bpf_state *state, uint64_t h_val); | |
196 | ||
197 | /** | |
198 | * Convert a 16-bit host integer into the target's endianess | |
199 | * @param arch the architecture definition | |
200 | * @param val the 16-bit integer | |
201 | * | |
202 | * Convert the endianess of the supplied value and return it to the caller. | |
203 | * | |
204 | */ | |
205 | uint16_t _htot16(const struct arch_def *arch, uint16_t val) | |
206 | { | |
207 | if (arch->endian == ARCH_ENDIAN_LITTLE) | |
208 | return htole16(val); | |
209 | else | |
210 | return htobe16(val); | |
211 | } | |
212 | ||
213 | /** | |
214 | * Convert a 32-bit host integer into the target's endianess | |
215 | * @param arch the architecture definition | |
216 | * @param val the 32-bit integer | |
217 | * | |
218 | * Convert the endianess of the supplied value and return it to the caller. | |
219 | * | |
220 | */ | |
221 | uint32_t _htot32(const struct arch_def *arch, uint32_t val) | |
222 | { | |
223 | if (arch->endian == ARCH_ENDIAN_LITTLE) | |
224 | return htole32(val); | |
225 | else | |
226 | return htobe32(val); | |
227 | } | |
228 | ||
229 | /** | |
230 | * Free the BPF instruction block | |
231 | * @param state the BPF state | |
232 | * @param blk the BPF instruction block | |
233 | * | |
234 | * Free the BPF instruction block, any linked blocks are preserved and the hash | |
235 | * table is not modified. In general, you probably want to use _blk_free() | |
236 | * instead. | |
237 | * | |
238 | */ | |
239 | static void __blk_free(struct bpf_state *state, struct bpf_blk *blk) | |
240 | { | |
241 | struct bpf_blk *b_tmp; | |
242 | ||
243 | while (blk->hash_nxt != NULL) { | |
244 | b_tmp = blk->hash_nxt; | |
245 | blk->hash_nxt = b_tmp->hash_nxt; | |
246 | if (!b_tmp->flag_dup) | |
247 | free(b_tmp); | |
248 | } | |
249 | if (blk->blks != NULL && blk->flag_unique) | |
250 | free(blk->blks); | |
251 | free(blk); | |
252 | } | |
253 | ||
254 | /** | |
255 | * Free the BPF instruction block | |
256 | * @param state the BPF state | |
257 | * @param blk the BPF instruction block | |
258 | * | |
259 | * Free the BPF instruction block including any linked blocks. The hash table | |
260 | * is updated to reflect the newly removed block(s). | |
261 | * | |
262 | */ | |
263 | static void _blk_free(struct bpf_state *state, struct bpf_blk *blk) | |
264 | { | |
265 | int iter; | |
266 | struct bpf_blk *b_iter; | |
267 | struct bpf_instr *i_iter; | |
268 | ||
269 | if (blk == NULL) | |
270 | return; | |
271 | ||
272 | /* remove this block from the hash table */ | |
273 | _hsh_remove(state, blk->hash); | |
274 | ||
275 | /* run through the block freeing TGT_PTR_{BLK,HSH} jump targets */ | |
276 | for (iter = 0; iter < blk->blk_cnt; iter++) { | |
277 | i_iter = &blk->blks[iter]; | |
278 | switch (i_iter->jt.type) { | |
279 | case TGT_PTR_BLK: | |
280 | _blk_free(state, i_iter->jt.tgt.blk); | |
281 | break; | |
282 | case TGT_PTR_HSH: | |
283 | b_iter = _hsh_find(state, i_iter->jt.tgt.hash); | |
284 | _blk_free(state, b_iter); | |
285 | break; | |
286 | default: | |
287 | /* do nothing */ | |
288 | break; | |
289 | } | |
290 | switch (i_iter->jf.type) { | |
291 | case TGT_PTR_BLK: | |
292 | _blk_free(state, i_iter->jf.tgt.blk); | |
293 | break; | |
294 | case TGT_PTR_HSH: | |
295 | b_iter = _hsh_find(state, i_iter->jf.tgt.hash); | |
296 | _blk_free(state, b_iter); | |
297 | break; | |
298 | default: | |
299 | /* do nothing */ | |
300 | break; | |
301 | } | |
302 | } | |
303 | __blk_free(state, blk); | |
304 | } | |
305 | ||
306 | /** | |
307 | * Allocate and initialize a new instruction block | |
308 | * | |
309 | * Allocate a new BPF instruction block and perform some very basic | |
310 | * initialization. Returns a pointer to the block on success, NULL on failure. | |
311 | * | |
312 | */ | |
313 | static struct bpf_blk *_blk_alloc(void) | |
314 | { | |
315 | struct bpf_blk *blk; | |
316 | ||
317 | blk = malloc(sizeof(*blk)); | |
318 | if (blk == NULL) | |
319 | return NULL; | |
320 | ||
321 | memset(blk, 0, sizeof(*blk)); | |
322 | blk->flag_unique = true; | |
323 | blk->acc_start = _ACC_STATE_UNDEF; | |
324 | blk->acc_end = _ACC_STATE_UNDEF; | |
325 | ||
326 | return blk; | |
327 | } | |
328 | ||
329 | /** | |
330 | * Resize an instruction block | |
331 | * @param state the BPF state | |
332 | * @param blk the existing instruction block, or NULL | |
333 | * @param size_add the minimum amount of instructions to add | |
334 | * | |
335 | * Resize the given instruction block such that it is at least as large as the | |
336 | * current size plus @size_add. Returns a pointer to the block on success, | |
337 | * NULL on failure. | |
338 | * | |
339 | */ | |
340 | static struct bpf_blk *_blk_resize(struct bpf_state *state, | |
341 | struct bpf_blk *blk, | |
342 | unsigned int size_add) | |
343 | { | |
344 | unsigned int size_adj = (AINC_BLK > size_add ? AINC_BLK : size_add); | |
345 | struct bpf_instr *new; | |
346 | ||
347 | if (blk == NULL) | |
348 | return NULL; | |
349 | ||
350 | if ((blk->blk_cnt + size_adj) <= blk->blk_alloc) | |
351 | return blk; | |
352 | ||
353 | blk->blk_alloc += size_adj; | |
354 | new = realloc(blk->blks, blk->blk_alloc * sizeof(*(blk->blks))); | |
355 | if (new == NULL) { | |
356 | _blk_free(state, blk); | |
357 | return NULL; | |
358 | } | |
359 | blk->blks = new; | |
360 | ||
361 | return blk; | |
362 | } | |
363 | ||
364 | /** | |
365 | * Append a new BPF instruction to an instruction block | |
366 | * @param state the BPF state | |
367 | * @param blk the existing instruction block, or NULL | |
368 | * @param instr the new instruction | |
369 | * | |
370 | * Add the new BPF instruction to the end of the given instruction block. If | |
371 | * the given instruction block is NULL, a new block will be allocated. Returns | |
372 | * a pointer to the block on success, NULL on failure, and in the case of | |
373 | * failure the instruction block is free'd. | |
374 | * | |
375 | */ | |
376 | static struct bpf_blk *_blk_append(struct bpf_state *state, | |
377 | struct bpf_blk *blk, | |
378 | const struct bpf_instr *instr) | |
379 | { | |
380 | if (blk == NULL) { | |
381 | blk = _blk_alloc(); | |
382 | if (blk == NULL) | |
383 | return NULL; | |
384 | } | |
385 | ||
386 | if (_blk_resize(state, blk, 1) == NULL) | |
387 | return NULL; | |
388 | memcpy(&blk->blks[blk->blk_cnt++], instr, sizeof(*instr)); | |
389 | ||
390 | return blk; | |
391 | } | |
392 | ||
393 | /** | |
394 | * Prepend a new BPF instruction to an instruction block | |
395 | * @param state the BPF state | |
396 | * @param blk the existing instruction block, or NULL | |
397 | * @param instr the new instruction | |
398 | * | |
399 | * Add the new BPF instruction to the start of the given instruction block. | |
400 | * If the given instruction block is NULL, a new block will be allocated. | |
401 | * Returns a pointer to the block on success, NULL on failure, and in the case | |
402 | * of failure the instruction block is free'd. | |
403 | * | |
404 | */ | |
405 | static struct bpf_blk *_blk_prepend(struct bpf_state *state, | |
406 | struct bpf_blk *blk, | |
407 | const struct bpf_instr *instr) | |
408 | { | |
409 | /* empty - we can treat this like a normal append operation */ | |
410 | if (blk == NULL || blk->blk_cnt == 0) | |
411 | return _blk_append(state, blk, instr); | |
412 | ||
413 | if (_blk_resize(state, blk, 1) == NULL) | |
414 | return NULL; | |
415 | memmove(&blk->blks[1], &blk->blks[0], blk->blk_cnt++ * sizeof(*instr)); | |
416 | memcpy(&blk->blks[0], instr, sizeof(*instr)); | |
417 | ||
418 | return blk; | |
419 | } | |
420 | ||
421 | /** | |
422 | * Append a block of BPF instructions to the final BPF program | |
423 | * @param prg the BPF program | |
424 | * @param blk the BPF instruction block | |
425 | * | |
426 | * Add the BPF instruction block to the end of the BPF program and perform the | |
427 | * necssary translation. Returns zero on success, negative values on failure | |
428 | * and in the case of failure the BPF program is free'd. | |
429 | * | |
430 | */ | |
431 | static int _bpf_append_blk(struct bpf_program *prg, const struct bpf_blk *blk) | |
432 | { | |
433 | int rc; | |
434 | bpf_instr_raw *i_new; | |
435 | bpf_instr_raw *i_iter; | |
436 | unsigned int old_cnt = prg->blk_cnt; | |
437 | unsigned int iter; | |
438 | ||
439 | /* (re)allocate the program memory */ | |
440 | prg->blk_cnt += blk->blk_cnt; | |
441 | i_new = realloc(prg->blks, BPF_PGM_SIZE(prg)); | |
442 | if (i_new == NULL) { | |
443 | rc = -ENOMEM; | |
444 | goto bpf_append_blk_failure; | |
445 | } | |
446 | prg->blks = i_new; | |
447 | ||
448 | /* transfer and translate the blocks to raw instructions */ | |
449 | for (iter = 0; iter < blk->blk_cnt; iter++) { | |
450 | i_iter = &(prg->blks[old_cnt + iter]); | |
451 | ||
452 | i_iter->code = blk->blks[iter].op; | |
453 | switch (blk->blks[iter].jt.type) { | |
454 | case TGT_NONE: | |
455 | i_iter->jt = 0; | |
456 | break; | |
457 | case TGT_IMM: | |
458 | /* jump to the value specified */ | |
459 | i_iter->jt = blk->blks[iter].jt.tgt.imm_j; | |
460 | break; | |
461 | default: | |
462 | /* fatal error - we should never get here */ | |
463 | rc = -EFAULT; | |
464 | goto bpf_append_blk_failure; | |
465 | } | |
466 | switch (blk->blks[iter].jf.type) { | |
467 | case TGT_NONE: | |
468 | i_iter->jf = 0; | |
469 | break; | |
470 | case TGT_IMM: | |
471 | /* jump to the value specified */ | |
472 | i_iter->jf = blk->blks[iter].jf.tgt.imm_j; | |
473 | break; | |
474 | default: | |
475 | /* fatal error - we should never get here */ | |
476 | rc = -EFAULT; | |
477 | goto bpf_append_blk_failure; | |
478 | } | |
479 | switch (blk->blks[iter].k.type) { | |
480 | case TGT_NONE: | |
481 | i_iter->k = 0; | |
482 | break; | |
483 | case TGT_K: | |
484 | i_iter->k = blk->blks[iter].k.tgt.imm_k; | |
485 | break; | |
486 | default: | |
487 | /* fatal error - we should never get here */ | |
488 | rc = -EFAULT; | |
489 | goto bpf_append_blk_failure; | |
490 | } | |
491 | } | |
492 | ||
493 | return prg->blk_cnt; | |
494 | ||
495 | bpf_append_blk_failure: | |
496 | prg->blk_cnt = 0; | |
497 | free(prg->blks); | |
498 | return rc; | |
499 | } | |
500 | ||
501 | /** | |
502 | * Free the BPF program | |
503 | * @param prg the BPF program | |
504 | * | |
505 | * Free the BPF program. None of the associated BPF state used to generate the | |
506 | * BPF program is released in this function. | |
507 | * | |
508 | */ | |
509 | static void _program_free(struct bpf_program *prg) | |
510 | { | |
511 | if (prg == NULL) | |
512 | return; | |
513 | ||
514 | if (prg->blks != NULL) | |
515 | free(prg->blks); | |
516 | free(prg); | |
517 | } | |
518 | ||
519 | /** | |
520 | * Free the BPF state | |
521 | * @param state the BPF state | |
522 | * | |
523 | * Free all of the BPF state, including the BPF program if present. | |
524 | * | |
525 | */ | |
526 | static void _state_release(struct bpf_state *state) | |
527 | { | |
528 | unsigned int bkt; | |
529 | struct bpf_hash_bkt *iter; | |
530 | ||
531 | if (state == NULL) | |
532 | return; | |
533 | ||
534 | /* release all of the hash table entries */ | |
535 | for (bkt = 0; bkt < _BPF_HASH_SIZE; bkt++) { | |
536 | while (state->htbl[bkt]) { | |
537 | iter = state->htbl[bkt]; | |
538 | state->htbl[bkt] = iter->next; | |
539 | __blk_free(state, iter->blk); | |
540 | free(iter); | |
541 | } | |
542 | } | |
543 | _program_free(state->bpf); | |
544 | ||
545 | memset(state, 0, sizeof(*state)); | |
546 | } | |
547 | ||
548 | /** | |
549 | * Add an instruction block to the BPF state hash table | |
550 | * @param state the BPF state | |
551 | * @param blk_p pointer to the BPF instruction block | |
552 | * @param found initial found value (see _hsh_find_once() for description) | |
553 | * | |
554 | * This function adds an instruction block to the hash table, and frees the | |
555 | * block if an identical instruction block already exists, returning a pointer | |
556 | * to the original block in place of the given block. Returns zero on success | |
557 | * and negative values on failure. | |
558 | * | |
559 | */ | |
560 | static int _hsh_add(struct bpf_state *state, struct bpf_blk **blk_p, | |
561 | unsigned int found) | |
562 | { | |
563 | uint64_t h_val; | |
564 | struct bpf_hash_bkt *h_new, *h_iter, *h_prev = NULL; | |
565 | struct bpf_blk *blk = *blk_p; | |
566 | struct bpf_blk *b_iter; | |
567 | ||
568 | if (blk->flag_hash) | |
569 | return 0; | |
570 | ||
571 | h_new = malloc(sizeof(*h_new)); | |
572 | if (h_new == NULL) | |
573 | return -ENOMEM; | |
574 | memset(h_new, 0, sizeof(*h_new)); | |
575 | ||
576 | /* generate the hash */ | |
577 | h_val = jhash(blk->blks, _BLK_MSZE(blk), 0); | |
578 | blk->hash = h_val; | |
579 | blk->flag_hash = true; | |
580 | blk->node = NULL; | |
581 | h_new->blk = blk; | |
582 | h_new->found = (found ? 1 : 0); | |
583 | ||
584 | /* insert the block into the hash table */ | |
585 | hsh_add_restart: | |
586 | h_iter = state->htbl[h_val & _BPF_HASH_MASK]; | |
587 | if (h_iter != NULL) { | |
588 | do { | |
589 | if ((h_iter->blk->hash == h_val) && | |
590 | (_BLK_MSZE(h_iter->blk) == _BLK_MSZE(blk)) && | |
591 | (memcmp(h_iter->blk->blks, blk->blks, | |
592 | _BLK_MSZE(blk)) == 0)) { | |
593 | /* duplicate block */ | |
594 | free(h_new); | |
595 | ||
596 | /* store the duplicate block */ | |
597 | b_iter = h_iter->blk; | |
598 | while (b_iter->hash_nxt != NULL) | |
599 | b_iter = b_iter->hash_nxt; | |
600 | b_iter->hash_nxt = blk; | |
601 | ||
602 | /* in some cases we want to return the | |
603 | * duplicate block */ | |
604 | if (found) { | |
605 | blk->flag_dup = true; | |
606 | return 0; | |
607 | } | |
608 | ||
609 | /* update the priority if needed */ | |
610 | if (h_iter->blk->priority < blk->priority) | |
611 | h_iter->blk->priority = blk->priority; | |
612 | ||
613 | /* try to save some memory */ | |
614 | free(blk->blks); | |
615 | blk->blks = h_iter->blk->blks; | |
616 | blk->flag_unique = false; | |
617 | ||
618 | *blk_p = h_iter->blk; | |
619 | return 0; | |
620 | } else if (h_iter->blk->hash == h_val) { | |
621 | /* hash collision */ | |
622 | if ((h_val >> 32) == 0xffffffff) { | |
623 | /* overflow */ | |
624 | blk->flag_hash = false; | |
625 | blk->hash = 0; | |
626 | free(h_new); | |
627 | return -EFAULT; | |
628 | } | |
629 | h_val += ((uint64_t)1 << 32); | |
630 | h_new->blk->hash = h_val; | |
631 | ||
632 | /* restart at the beginning of the bucket */ | |
633 | goto hsh_add_restart; | |
634 | } else { | |
635 | /* no match, move along */ | |
636 | h_prev = h_iter; | |
637 | h_iter = h_iter->next; | |
638 | } | |
639 | } while (h_iter != NULL); | |
640 | h_prev->next = h_new; | |
641 | } else | |
642 | state->htbl[h_val & _BPF_HASH_MASK] = h_new; | |
643 | ||
644 | return 0; | |
645 | } | |
646 | ||
647 | /** | |
648 | * Remove an entry from the hash table | |
649 | * @param state the BPF state | |
650 | * @param h_val the hash value | |
651 | * | |
652 | * Remove an entry from the hash table and return it to the caller, NULL is | |
653 | * returned if the entry can not be found. | |
654 | * | |
655 | */ | |
656 | static struct bpf_blk *_hsh_remove(struct bpf_state *state, uint64_t h_val) | |
657 | { | |
658 | unsigned int bkt = h_val & _BPF_HASH_MASK; | |
659 | struct bpf_blk *blk; | |
660 | struct bpf_hash_bkt *h_iter, *h_prev = NULL; | |
661 | ||
662 | h_iter = state->htbl[bkt]; | |
663 | while (h_iter != NULL) { | |
664 | if (h_iter->blk->hash == h_val) { | |
665 | if (h_prev != NULL) | |
666 | h_prev->next = h_iter->next; | |
667 | else | |
668 | state->htbl[bkt] = h_iter->next; | |
669 | blk = h_iter->blk; | |
670 | free(h_iter); | |
671 | return blk; | |
672 | } | |
673 | h_prev = h_iter; | |
674 | h_iter = h_iter->next; | |
675 | } | |
676 | ||
677 | return NULL; | |
678 | } | |
679 | ||
680 | /** | |
681 | * Find and return a hash bucket | |
682 | * @param state the BPF state | |
683 | * @param h_val the hash value | |
684 | * | |
685 | * Find the entry associated with the given hash value and return it to the | |
686 | * caller, NULL is returned if the entry can not be found. This function | |
687 | * should not be called directly; use _hsh_find() and _hsh_find_once() instead. | |
688 | * | |
689 | */ | |
690 | static struct bpf_hash_bkt *_hsh_find_bkt(const struct bpf_state *state, | |
691 | uint64_t h_val) | |
692 | { | |
693 | struct bpf_hash_bkt *h_iter; | |
694 | ||
695 | h_iter = state->htbl[h_val & _BPF_HASH_MASK]; | |
696 | while (h_iter != NULL) { | |
697 | if (h_iter->blk->hash == h_val) | |
698 | return h_iter; | |
699 | h_iter = h_iter->next; | |
700 | } | |
701 | ||
702 | return NULL; | |
703 | } | |
704 | ||
705 | /** | |
706 | * Find and only return an entry in the hash table once | |
707 | * @param state the BPF state | |
708 | * @param h_val the hash value | |
709 | * | |
710 | * Find the entry associated with the given hash value and return it to the | |
711 | * caller if it has not be returned previously by this function; returns NULL | |
712 | * if the entry can not be found or has already been returned in a previous | |
713 | * call. | |
714 | * | |
715 | */ | |
716 | static struct bpf_blk *_hsh_find_once(const struct bpf_state *state, | |
717 | uint64_t h_val) | |
718 | { | |
719 | struct bpf_hash_bkt *h_iter; | |
720 | ||
721 | h_iter = _hsh_find_bkt(state, h_val); | |
722 | if (h_iter == NULL || h_iter->found != 0) | |
723 | return NULL; | |
724 | h_iter->found = 1; | |
725 | return h_iter->blk; | |
726 | } | |
727 | ||
728 | /** | |
729 | * Finds an entry in the hash table | |
730 | * @param state the BPF state | |
731 | * @param h_val the hash value | |
732 | * | |
733 | * Find the entry associated with the given hash value and return it to the | |
734 | * caller, NULL is returned if the entry can not be found. | |
735 | * | |
736 | */ | |
737 | static struct bpf_blk *_hsh_find(const struct bpf_state *state, uint64_t h_val) | |
738 | { | |
739 | struct bpf_hash_bkt *h_iter; | |
740 | ||
741 | h_iter = _hsh_find_bkt(state, h_val); | |
742 | if (h_iter == NULL) | |
743 | return NULL; | |
744 | return h_iter->blk; | |
745 | } | |
746 | ||
747 | /** | |
748 | * Generate a BPF action instruction | |
749 | * @param state the BPF state | |
750 | * @param blk the BPF instruction block, or NULL | |
751 | * @param action the desired action | |
752 | * | |
753 | * Generate a BPF action instruction and append it to the given instruction | |
754 | * block. Returns a pointer to the instruction block on success, NULL on | |
755 | * failure. | |
756 | * | |
757 | */ | |
758 | static struct bpf_blk *_gen_bpf_action(struct bpf_state *state, | |
759 | struct bpf_blk *blk, uint32_t action) | |
760 | { | |
761 | struct bpf_instr instr; | |
762 | ||
763 | _BPF_INSTR(instr, _BPF_OP(state->arch, BPF_RET), | |
764 | _BPF_JMP_NO, _BPF_JMP_NO, _BPF_K(state->arch, action)); | |
765 | return _blk_append(state, blk, &instr); | |
766 | } | |
767 | ||
768 | /** | |
769 | * Generate a BPF action instruction and insert it into the hash table | |
770 | * @param state the BPF state | |
771 | * @param action the desired action | |
772 | * | |
773 | * Generate a BPF action instruction and insert it into the hash table. | |
774 | * Returns a pointer to the instruction block on success, NULL on failure. | |
775 | * | |
776 | */ | |
777 | static struct bpf_blk *_gen_bpf_action_hsh(struct bpf_state *state, | |
778 | uint32_t action) | |
779 | { | |
780 | struct bpf_blk *blk; | |
781 | ||
782 | blk = _gen_bpf_action(state, NULL, action); | |
783 | if (blk == NULL) | |
784 | return NULL; | |
785 | if (_hsh_add(state, &blk, 0) < 0) { | |
786 | _blk_free(state, blk); | |
787 | return NULL; | |
788 | } | |
789 | ||
790 | return blk; | |
791 | } | |
792 | ||
793 | /** | |
794 | * Generate a BPF instruction block for a given chain node | |
795 | * @param state the BPF state | |
796 | * @param node the filter chain node | |
797 | * @param a_state the accumulator state | |
798 | * | |
799 | * Generate BPF instructions to execute the filter for the given chain node. | |
800 | * Returns a pointer to the instruction block on success, NULL on failure. | |
801 | * | |
802 | */ | |
803 | static struct bpf_blk *_gen_bpf_node(struct bpf_state *state, | |
804 | const struct db_arg_chain_tree *node, | |
805 | struct acc_state *a_state) | |
806 | { | |
807 | int32_t acc_offset; | |
808 | uint32_t acc_mask; | |
809 | uint64_t act_t_hash = 0, act_f_hash = 0; | |
810 | struct bpf_blk *blk, *b_act; | |
811 | struct bpf_instr instr; | |
812 | ||
813 | blk = _blk_alloc(); | |
814 | if (blk == NULL) | |
815 | return NULL; | |
816 | blk->acc_start = *a_state; | |
817 | ||
818 | /* generate the action blocks */ | |
819 | if (node->act_t_flg) { | |
820 | b_act = _gen_bpf_action_hsh(state, node->act_t); | |
821 | if (b_act == NULL) | |
822 | goto node_failure; | |
823 | act_t_hash = b_act->hash; | |
824 | } | |
825 | if (node->act_f_flg) { | |
826 | b_act = _gen_bpf_action_hsh(state, node->act_f); | |
827 | if (b_act == NULL) | |
828 | goto node_failure; | |
829 | act_f_hash = b_act->hash; | |
830 | } | |
831 | ||
832 | /* check the accumulator state */ | |
833 | acc_offset = node->arg_offset; | |
834 | acc_mask = node->mask; | |
835 | if (acc_offset < 0) | |
836 | goto node_failure; | |
837 | if ((acc_offset != a_state->offset) || | |
838 | ((acc_mask & a_state->mask) != acc_mask)) { | |
839 | /* reload the accumulator */ | |
840 | a_state->offset = acc_offset; | |
841 | a_state->mask = ARG_MASK_MAX; | |
842 | _BPF_INSTR(instr, _BPF_OP(state->arch, BPF_LD + BPF_ABS), | |
843 | _BPF_JMP_NO, _BPF_JMP_NO, | |
844 | _BPF_K(state->arch, acc_offset)); | |
845 | blk = _blk_append(state, blk, &instr); | |
846 | if (blk == NULL) | |
847 | goto node_failure; | |
848 | /* we're not dependent on the accumulator anymore */ | |
849 | blk->acc_start = _ACC_STATE_UNDEF; | |
850 | } | |
851 | if (acc_mask != a_state->mask) { | |
852 | /* apply the bitmask */ | |
853 | a_state->mask = acc_mask; | |
854 | _BPF_INSTR(instr, _BPF_OP(state->arch, BPF_ALU + BPF_AND), | |
855 | _BPF_JMP_NO, _BPF_JMP_NO, | |
856 | _BPF_K(state->arch, acc_mask)); | |
857 | blk = _blk_append(state, blk, &instr); | |
858 | if (blk == NULL) | |
859 | goto node_failure; | |
860 | } | |
861 | ||
862 | /* check the accumulator against the datum */ | |
863 | switch (node->op) { | |
864 | case SCMP_CMP_MASKED_EQ: | |
865 | case SCMP_CMP_EQ: | |
866 | _BPF_INSTR(instr, _BPF_OP(state->arch, BPF_JMP + BPF_JEQ), | |
867 | _BPF_JMP_NO, _BPF_JMP_NO, | |
868 | _BPF_K(state->arch, node->datum)); | |
869 | break; | |
870 | case SCMP_CMP_GT: | |
871 | _BPF_INSTR(instr, _BPF_OP(state->arch, BPF_JMP + BPF_JGT), | |
872 | _BPF_JMP_NO, _BPF_JMP_NO, | |
873 | _BPF_K(state->arch, node->datum)); | |
874 | break; | |
875 | case SCMP_CMP_GE: | |
876 | _BPF_INSTR(instr, _BPF_OP(state->arch, BPF_JMP + BPF_JGE), | |
877 | _BPF_JMP_NO, _BPF_JMP_NO, | |
878 | _BPF_K(state->arch, node->datum)); | |
879 | break; | |
880 | case SCMP_CMP_NE: | |
881 | case SCMP_CMP_LT: | |
882 | case SCMP_CMP_LE: | |
883 | default: | |
884 | /* fatal error, we should never get here */ | |
885 | goto node_failure; | |
886 | } | |
887 | ||
888 | /* fixup the jump targets */ | |
889 | if (node->nxt_t != NULL) | |
890 | instr.jt = _BPF_JMP_DB(node->nxt_t); | |
891 | else if (node->act_t_flg) | |
892 | instr.jt = _BPF_JMP_HSH(act_t_hash); | |
893 | else | |
894 | instr.jt = _BPF_JMP_NXT(0); | |
895 | if (node->nxt_f != NULL) | |
896 | instr.jf = _BPF_JMP_DB(node->nxt_f); | |
897 | else if (node->act_f_flg) | |
898 | instr.jf = _BPF_JMP_HSH(act_f_hash); | |
899 | else | |
900 | instr.jf = _BPF_JMP_NXT(0); | |
901 | blk = _blk_append(state, blk, &instr); | |
902 | if (blk == NULL) | |
903 | goto node_failure; | |
904 | ||
905 | blk->node = node; | |
906 | blk->acc_end = *a_state; | |
907 | return blk; | |
908 | ||
909 | node_failure: | |
910 | _blk_free(state, blk); | |
911 | return NULL; | |
912 | } | |
913 | ||
914 | /** | |
915 | * Resolve the jump targets in a BPF instruction block | |
916 | * @param state the BPF state | |
917 | * @param sys the syscall filter | |
918 | * @param blk the BPF instruction block | |
919 | * @param nxt_jump the jump to fallthrough to at the end of the level | |
920 | * | |
921 | * Resolve the jump targets in a BPF instruction block generated by the | |
922 | * _gen_bpf_chain_lvl() function and adds the resulting block to the hash | |
923 | * table. Returns a pointer to the new instruction block on success, NULL on | |
924 | * failure. | |
925 | * | |
926 | */ | |
927 | static struct bpf_blk *_gen_bpf_chain_lvl_res(struct bpf_state *state, | |
928 | const struct db_sys_list *sys, | |
929 | struct bpf_blk *blk, | |
930 | const struct bpf_jump *nxt_jump) | |
931 | { | |
932 | int rc; | |
933 | unsigned int iter; | |
934 | struct bpf_blk *b_new; | |
935 | struct bpf_instr *i_iter; | |
936 | struct db_arg_chain_tree *node; | |
937 | ||
938 | if (blk->flag_hash) | |
939 | return blk; | |
940 | ||
941 | /* convert TGT_PTR_DB to TGT_PTR_HSH references */ | |
942 | for (iter = 0; iter < blk->blk_cnt; iter++) { | |
943 | i_iter = &blk->blks[iter]; | |
944 | switch (i_iter->jt.type) { | |
945 | case TGT_NONE: | |
946 | case TGT_IMM: | |
947 | case TGT_PTR_HSH: | |
948 | /* ignore these jump types */ | |
949 | break; | |
950 | case TGT_PTR_BLK: | |
951 | b_new = _gen_bpf_chain_lvl_res(state, sys, | |
952 | i_iter->jt.tgt.blk, | |
953 | nxt_jump); | |
954 | if (b_new == NULL) | |
955 | return NULL; | |
956 | i_iter->jt = _BPF_JMP_HSH(b_new->hash); | |
957 | break; | |
958 | case TGT_PTR_DB: | |
959 | node = (struct db_arg_chain_tree *)i_iter->jt.tgt.db; | |
960 | b_new = _gen_bpf_chain(state, sys, node, | |
961 | nxt_jump, &blk->acc_start); | |
962 | if (b_new == NULL) | |
963 | return NULL; | |
964 | i_iter->jt = _BPF_JMP_HSH(b_new->hash); | |
965 | break; | |
966 | default: | |
967 | /* we should not be here */ | |
968 | return NULL; | |
969 | } | |
970 | switch (i_iter->jf.type) { | |
971 | case TGT_NONE: | |
972 | case TGT_IMM: | |
973 | case TGT_PTR_HSH: | |
974 | /* ignore these jump types */ | |
975 | break; | |
976 | case TGT_PTR_BLK: | |
977 | b_new = _gen_bpf_chain_lvl_res(state, sys, | |
978 | i_iter->jf.tgt.blk, | |
979 | nxt_jump); | |
980 | if (b_new == NULL) | |
981 | return NULL; | |
982 | i_iter->jf = _BPF_JMP_HSH(b_new->hash); | |
983 | break; | |
984 | case TGT_PTR_DB: | |
985 | node = (struct db_arg_chain_tree *)i_iter->jf.tgt.db; | |
986 | b_new = _gen_bpf_chain(state, sys, node, | |
987 | nxt_jump, &blk->acc_start); | |
988 | if (b_new == NULL) | |
989 | return NULL; | |
990 | i_iter->jf = _BPF_JMP_HSH(b_new->hash); | |
991 | break; | |
992 | default: | |
993 | /* we should not be here */ | |
994 | return NULL; | |
995 | } | |
996 | switch (i_iter->k.type) { | |
997 | case TGT_NONE: | |
998 | case TGT_K: | |
999 | case TGT_PTR_HSH: | |
1000 | /* ignore these jump types */ | |
1001 | break; | |
1002 | default: | |
1003 | /* we should not be here */ | |
1004 | return NULL; | |
1005 | } | |
1006 | } | |
1007 | ||
1008 | /* insert the block into the hash table */ | |
1009 | rc = _hsh_add(state, &blk, 0); | |
1010 | if (rc < 0) | |
1011 | return NULL; | |
1012 | ||
1013 | return blk; | |
1014 | } | |
1015 | ||
1016 | /** | |
1017 | * Generates the BPF instruction blocks for a given filter chain | |
1018 | * @param state the BPF state | |
1019 | * @param sys the syscall filter | |
1020 | * @param chain the filter chain | |
1021 | * @param nxt_jump the jump to fallthrough to at the end of the level | |
1022 | * @param a_state the accumulator state | |
1023 | * | |
1024 | * Generate the BPF instruction blocks for the given filter chain and return | |
1025 | * a pointer to the first block on success; returns NULL on failure. | |
1026 | * | |
1027 | */ | |
1028 | static struct bpf_blk *_gen_bpf_chain(struct bpf_state *state, | |
1029 | const struct db_sys_list *sys, | |
1030 | const struct db_arg_chain_tree *chain, | |
1031 | const struct bpf_jump *nxt_jump, | |
1032 | struct acc_state *a_state) | |
1033 | { | |
1034 | struct bpf_blk *b_head = NULL, *b_tail = NULL; | |
1035 | struct bpf_blk *b_prev, *b_next, *b_iter; | |
1036 | struct bpf_instr *i_iter; | |
1037 | const struct db_arg_chain_tree *c_iter; | |
1038 | unsigned int iter; | |
1039 | struct bpf_jump nxt_jump_tmp; | |
1040 | struct acc_state acc = *a_state; | |
1041 | ||
1042 | if (chain == NULL) { | |
1043 | b_head = _gen_bpf_action(state, NULL, sys->action); | |
1044 | if (b_head == NULL) | |
1045 | goto chain_failure; | |
1046 | b_tail = b_head; | |
1047 | } else { | |
1048 | /* find the starting node of the level */ | |
1049 | c_iter = chain; | |
1050 | while (c_iter->lvl_prv != NULL) | |
1051 | c_iter = c_iter->lvl_prv; | |
1052 | ||
1053 | /* build all of the blocks for this level */ | |
1054 | do { | |
1055 | b_iter = _gen_bpf_node(state, c_iter, &acc); | |
1056 | if (b_iter == NULL) | |
1057 | goto chain_failure; | |
1058 | if (b_head != NULL) { | |
1059 | b_iter->lvl_prv = b_tail; | |
1060 | b_tail->lvl_nxt = b_iter; | |
1061 | b_tail = b_iter; | |
1062 | } else { | |
1063 | b_head = b_iter; | |
1064 | b_tail = b_iter; | |
1065 | } | |
1066 | c_iter = c_iter->lvl_nxt; | |
1067 | } while (c_iter != NULL); | |
1068 | ||
1069 | /* resolve the TGT_NXT jumps */ | |
1070 | b_iter = b_head; | |
1071 | do { | |
1072 | b_next = b_iter->lvl_nxt; | |
1073 | for (iter = 0; iter < b_iter->blk_cnt; iter++) { | |
1074 | i_iter = &b_iter->blks[iter]; | |
1075 | if (i_iter->jt.type == TGT_NXT) { | |
1076 | if (i_iter->jt.tgt.nxt != 0) | |
1077 | goto chain_failure; | |
1078 | if (b_next == NULL) | |
1079 | i_iter->jt = *nxt_jump; | |
1080 | else | |
1081 | i_iter->jt = | |
1082 | _BPF_JMP_BLK(b_next); | |
1083 | } | |
1084 | if (i_iter->jf.type == TGT_NXT) { | |
1085 | if (i_iter->jf.tgt.nxt != 0) | |
1086 | goto chain_failure; | |
1087 | if (b_next == NULL) | |
1088 | i_iter->jf = *nxt_jump; | |
1089 | else | |
1090 | i_iter->jf = | |
1091 | _BPF_JMP_BLK(b_next); | |
1092 | } | |
1093 | } | |
1094 | b_iter = b_next; | |
1095 | } while (b_iter != NULL); | |
1096 | } | |
1097 | ||
1098 | /* resolve all of the blocks */ | |
1099 | memset(&nxt_jump_tmp, 0, sizeof(nxt_jump_tmp)); | |
1100 | b_iter = b_tail; | |
1101 | do { | |
1102 | /* b_iter may change after resolving, so save the linkage */ | |
1103 | b_prev = b_iter->lvl_prv; | |
1104 | b_next = b_iter->lvl_nxt; | |
1105 | ||
1106 | nxt_jump_tmp = _BPF_JMP_BLK(b_next); | |
1107 | b_iter = _gen_bpf_chain_lvl_res(state, sys, b_iter, | |
1108 | (b_next == NULL ? | |
1109 | nxt_jump : | |
1110 | &nxt_jump_tmp)); | |
1111 | if (b_iter == NULL) | |
1112 | goto chain_failure; | |
1113 | ||
1114 | /* restore the block linkage on this level */ | |
1115 | if (b_prev != NULL) | |
1116 | b_prev->lvl_nxt = b_iter; | |
1117 | b_iter->lvl_prv = b_prev; | |
1118 | b_iter->lvl_nxt = b_next; | |
1119 | if (b_next != NULL) | |
1120 | b_next->lvl_prv = b_iter; | |
1121 | if (b_iter->lvl_prv == NULL) | |
1122 | b_head = b_iter; | |
1123 | ||
1124 | b_iter = b_prev; | |
1125 | } while (b_iter != NULL); | |
1126 | ||
1127 | return b_head; | |
1128 | ||
1129 | chain_failure: | |
1130 | while (b_head != NULL) { | |
1131 | b_iter = b_head; | |
1132 | b_head = b_iter->lvl_nxt; | |
1133 | _blk_free(state, b_iter); | |
1134 | } | |
1135 | return NULL; | |
1136 | } | |
1137 | ||
1138 | /** | |
1139 | * Generate the BPF instruction blocks for a given syscall | |
1140 | * @param state the BPF state | |
1141 | * @param sys the syscall filter DB entry | |
1142 | * @param nxt_hash the hash value of the next syscall filter DB entry | |
1143 | * @param acc_reset accumulator reset flag | |
1144 | * | |
1145 | * Generate the BPF instruction blocks for the given syscall filter and return | |
1146 | * a pointer to the first block on success; returns NULL on failure. It is | |
1147 | * important to note that the block returned has not been added to the hash | |
1148 | * table, however, any linked/referenced blocks have been added to the hash | |
1149 | * table. | |
1150 | * | |
1151 | */ | |
1152 | static struct bpf_blk *_gen_bpf_syscall(struct bpf_state *state, | |
1153 | const struct db_sys_list *sys, | |
1154 | uint64_t nxt_hash, | |
1155 | bool acc_reset) | |
1156 | { | |
1157 | int rc; | |
1158 | struct bpf_instr instr; | |
1159 | struct bpf_blk *blk_c, *blk_s; | |
1160 | struct bpf_jump def_jump; | |
1161 | struct acc_state a_state; | |
1162 | ||
1163 | /* we do the memset before the assignment to keep valgrind happy */ | |
1164 | memset(&def_jump, 0, sizeof(def_jump)); | |
1165 | def_jump = _BPF_JMP_HSH(state->def_hsh); | |
1166 | ||
1167 | blk_s = _blk_alloc(); | |
1168 | if (blk_s == NULL) | |
1169 | return NULL; | |
1170 | ||
1171 | /* setup the accumulator state */ | |
1172 | if (acc_reset) { | |
1173 | _BPF_INSTR(instr, _BPF_OP(state->arch, BPF_LD + BPF_ABS), | |
1174 | _BPF_JMP_NO, _BPF_JMP_NO, | |
1175 | _BPF_SYSCALL(state->arch)); | |
1176 | blk_s = _blk_append(state, blk_s, &instr); | |
1177 | if (blk_s == NULL) | |
1178 | return NULL; | |
1179 | /* we've loaded the syscall ourselves */ | |
1180 | a_state = _ACC_STATE_OFFSET(_BPF_OFFSET_SYSCALL); | |
1181 | blk_s->acc_start = _ACC_STATE_UNDEF; | |
1182 | blk_s->acc_end = _ACC_STATE_OFFSET(_BPF_OFFSET_SYSCALL); | |
1183 | } else { | |
1184 | /* we rely on someone else to load the syscall */ | |
1185 | a_state = _ACC_STATE_UNDEF; | |
1186 | blk_s->acc_start = _ACC_STATE_OFFSET(_BPF_OFFSET_SYSCALL); | |
1187 | blk_s->acc_end = _ACC_STATE_OFFSET(_BPF_OFFSET_SYSCALL); | |
1188 | } | |
1189 | ||
1190 | /* generate the argument chains */ | |
1191 | blk_c = _gen_bpf_chain(state, sys, sys->chains, &def_jump, &a_state); | |
1192 | if (blk_c == NULL) { | |
1193 | _blk_free(state, blk_s); | |
1194 | return NULL; | |
1195 | } | |
1196 | ||
1197 | /* syscall check */ | |
1198 | _BPF_INSTR(instr, _BPF_OP(state->arch, BPF_JMP + BPF_JEQ), | |
1199 | _BPF_JMP_HSH(blk_c->hash), _BPF_JMP_HSH(nxt_hash), | |
1200 | _BPF_K(state->arch, sys->num)); | |
1201 | blk_s = _blk_append(state, blk_s, &instr); | |
1202 | if (blk_s == NULL) | |
1203 | return NULL; | |
1204 | blk_s->priority = sys->priority; | |
1205 | ||
1206 | /* add to the hash table */ | |
1207 | rc = _hsh_add(state, &blk_s, 1); | |
1208 | if (rc < 0) { | |
1209 | _blk_free(state, blk_s); | |
1210 | return NULL; | |
1211 | } | |
1212 | ||
1213 | return blk_s; | |
1214 | } | |
1215 | ||
1216 | /** | |
1217 | * Generate the BPF instruction blocks for a given filter/architecture | |
1218 | * @param state the BPF state | |
1219 | * @param db the filter DB | |
1220 | * @param db_secondary the secondary DB | |
1221 | * | |
1222 | * Generate the BPF instruction block for the given filter DB(s)/architecture(s) | |
1223 | * and return a pointer to the block on succes, NULL on failure. The resulting | |
1224 | * block assumes that the architecture token has already been loaded into the | |
1225 | * BPF accumulator. | |
1226 | * | |
1227 | */ | |
1228 | static struct bpf_blk *_gen_bpf_arch(struct bpf_state *state, | |
1229 | const struct db_filter *db, | |
1230 | const struct db_filter *db_secondary) | |
1231 | { | |
1232 | int rc; | |
1233 | unsigned int blk_cnt = 0; | |
1234 | bool acc_reset; | |
1235 | struct bpf_instr instr; | |
1236 | struct db_sys_list *s_head = NULL, *s_tail = NULL, *s_iter, *s_iter_b; | |
1237 | struct bpf_blk *b_head = NULL, *b_tail = NULL, *b_iter, *b_new; | |
1238 | ||
1239 | state->arch = db->arch; | |
1240 | ||
1241 | /* sort the syscall list */ | |
1242 | db_list_foreach(s_iter, db->syscalls) { | |
1243 | if (s_head != NULL) { | |
1244 | s_iter_b = s_head; | |
1245 | while ((s_iter_b->pri_nxt != NULL) && | |
1246 | (s_iter->priority <= s_iter_b->priority)) | |
1247 | s_iter_b = s_iter_b->pri_nxt; | |
1248 | ||
1249 | if (s_iter->priority > s_iter_b->priority) { | |
1250 | s_iter->pri_prv = s_iter_b->pri_prv; | |
1251 | s_iter->pri_nxt = s_iter_b; | |
1252 | if (s_iter_b == s_head) { | |
1253 | s_head->pri_prv = s_iter; | |
1254 | s_head = s_iter; | |
1255 | } else { | |
1256 | s_iter->pri_prv->pri_nxt = s_iter; | |
1257 | s_iter->pri_nxt->pri_prv = s_iter; | |
1258 | } | |
1259 | } else { | |
1260 | s_iter->pri_prv = s_tail; | |
1261 | s_iter->pri_nxt = NULL; | |
1262 | s_iter->pri_prv->pri_nxt = s_iter; | |
1263 | s_tail = s_iter; | |
1264 | } | |
1265 | } else { | |
1266 | s_head = s_iter; | |
1267 | s_tail = s_iter; | |
1268 | s_head->pri_prv = NULL; | |
1269 | s_head->pri_nxt = NULL; | |
1270 | } | |
1271 | } | |
1272 | if (db_secondary != NULL) { | |
1273 | db_list_foreach(s_iter, db_secondary->syscalls) { | |
1274 | if (s_head != NULL) { | |
1275 | s_iter_b = s_head; | |
1276 | while ((s_iter_b->pri_nxt != NULL) && | |
1277 | (s_iter->priority <= s_iter_b->priority)) | |
1278 | s_iter_b = s_iter_b->pri_nxt; | |
1279 | ||
1280 | if (s_iter->priority > s_iter_b->priority) { | |
1281 | s_iter->pri_prv = s_iter_b->pri_prv; | |
1282 | s_iter->pri_nxt = s_iter_b; | |
1283 | if (s_iter_b == s_head) { | |
1284 | s_head->pri_prv = s_iter; | |
1285 | s_head = s_iter; | |
1286 | } else { | |
1287 | s_iter->pri_prv->pri_nxt = | |
1288 | s_iter; | |
1289 | s_iter->pri_nxt->pri_prv = | |
1290 | s_iter; | |
1291 | } | |
1292 | } else { | |
1293 | s_iter->pri_prv = s_tail; | |
1294 | s_iter->pri_nxt = NULL; | |
1295 | s_iter->pri_prv->pri_nxt = s_iter; | |
1296 | s_tail = s_iter; | |
1297 | } | |
1298 | } else { | |
1299 | s_head = s_iter; | |
1300 | s_tail = s_iter; | |
1301 | s_head->pri_prv = NULL; | |
1302 | s_head->pri_nxt = NULL; | |
1303 | } | |
1304 | } | |
1305 | } | |
1306 | ||
1307 | if ((state->arch->token == SCMP_ARCH_X86_64 || | |
1308 | state->arch->token == SCMP_ARCH_X32) && (db_secondary == NULL)) | |
1309 | acc_reset = false; | |
1310 | else | |
1311 | acc_reset = true; | |
1312 | ||
1313 | /* create the syscall filters and add them to block list group */ | |
1314 | for (s_iter = s_tail; s_iter != NULL; s_iter = s_iter->pri_prv) { | |
1315 | if (!s_iter->valid) | |
1316 | continue; | |
1317 | ||
1318 | /* build the syscall filter */ | |
1319 | b_new = _gen_bpf_syscall(state, s_iter, | |
1320 | (b_head == NULL ? | |
1321 | state->def_hsh : b_head->hash), | |
1322 | (s_iter == s_head ? | |
1323 | acc_reset : false)); | |
1324 | if (b_new == NULL) | |
1325 | goto arch_failure; | |
1326 | ||
1327 | /* add the filter to the list head */ | |
1328 | b_new->prev = NULL; | |
1329 | b_new->next = b_head; | |
1330 | if (b_tail != NULL) { | |
1331 | b_head->prev = b_new; | |
1332 | b_head = b_new; | |
1333 | } else { | |
1334 | b_head = b_new; | |
1335 | b_tail = b_head; | |
1336 | } | |
1337 | ||
1338 | if (b_tail->next != NULL) | |
1339 | b_tail = b_tail->next; | |
1340 | blk_cnt++; | |
1341 | } | |
1342 | ||
1343 | /* additional ABI filtering */ | |
1344 | if ((state->arch->token == SCMP_ARCH_X86_64 || | |
1345 | state->arch->token == SCMP_ARCH_X32) && (db_secondary == NULL)) { | |
1346 | _BPF_INSTR(instr, _BPF_OP(state->arch, BPF_LD + BPF_ABS), | |
1347 | _BPF_JMP_NO, _BPF_JMP_NO, _BPF_SYSCALL(state->arch)); | |
1348 | b_new = _blk_append(state, NULL, &instr); | |
1349 | if (b_new == NULL) | |
1350 | goto arch_failure; | |
1351 | b_new->acc_end = _ACC_STATE_OFFSET(_BPF_OFFSET_SYSCALL); | |
1352 | if (state->arch->token == SCMP_ARCH_X86_64) { | |
1353 | /* filter out x32 */ | |
1354 | _BPF_INSTR(instr, | |
1355 | _BPF_OP(state->arch, BPF_JMP + BPF_JGE), | |
1356 | _BPF_JMP_HSH(state->bad_arch_hsh), | |
1357 | _BPF_JMP_NO, | |
1358 | _BPF_K(state->arch, X32_SYSCALL_BIT)); | |
1359 | if (b_head != NULL) | |
1360 | instr.jf = _BPF_JMP_HSH(b_head->hash); | |
1361 | else | |
1362 | instr.jf = _BPF_JMP_HSH(state->def_hsh); | |
1363 | blk_cnt++; | |
1364 | } else if (state->arch->token == SCMP_ARCH_X32) { | |
1365 | /* filter out x86_64 */ | |
1366 | _BPF_INSTR(instr, | |
1367 | _BPF_OP(state->arch, BPF_JMP + BPF_JGE), | |
1368 | _BPF_JMP_NO, | |
1369 | _BPF_JMP_HSH(state->bad_arch_hsh), | |
1370 | _BPF_K(state->arch, X32_SYSCALL_BIT)); | |
1371 | if (b_head != NULL) | |
1372 | instr.jt = _BPF_JMP_HSH(b_head->hash); | |
1373 | else | |
1374 | instr.jt = _BPF_JMP_HSH(state->def_hsh); | |
1375 | blk_cnt++; | |
1376 | } else | |
1377 | /* we should never get here */ | |
1378 | goto arch_failure; | |
1379 | b_new = _blk_append(state, b_new, &instr); | |
1380 | if (b_new == NULL) | |
1381 | goto arch_failure; | |
1382 | b_new->next = b_head; | |
1383 | if (b_head != NULL) | |
1384 | b_head->prev = b_new; | |
1385 | b_head = b_new; | |
1386 | rc = _hsh_add(state, &b_head, 1); | |
1387 | if (rc < 0) | |
1388 | goto arch_failure; | |
1389 | } | |
1390 | ||
1391 | /* do the ABI/architecture check */ | |
1392 | _BPF_INSTR(instr, _BPF_OP(state->arch, BPF_JMP + BPF_JEQ), | |
1393 | _BPF_JMP_NO, _BPF_JMP_NXT(blk_cnt++), | |
1394 | _BPF_K(state->arch, state->arch->token_bpf)); | |
1395 | if (b_head != NULL) | |
1396 | instr.jt = _BPF_JMP_HSH(b_head->hash); | |
1397 | else | |
1398 | instr.jt = _BPF_JMP_HSH(state->def_hsh); | |
1399 | b_new = _blk_append(state, NULL, &instr); | |
1400 | if (b_new == NULL) | |
1401 | goto arch_failure; | |
1402 | b_new->next = b_head; | |
1403 | if (b_head != NULL) | |
1404 | b_head->prev = b_new; | |
1405 | b_head = b_new; | |
1406 | rc = _hsh_add(state, &b_head, 1); | |
1407 | if (rc < 0) | |
1408 | goto arch_failure; | |
1409 | ||
1410 | state->arch = NULL; | |
1411 | return b_head; | |
1412 | ||
1413 | arch_failure: | |
1414 | /* NOTE: we do the cleanup here and not just return an error as all of | |
1415 | * the instruction blocks may not be added to the hash table when we | |
1416 | * hit an error */ | |
1417 | state->arch = NULL; | |
1418 | b_iter = b_head; | |
1419 | while (b_iter != NULL) { | |
1420 | b_new = b_iter->next; | |
1421 | _blk_free(state, b_iter); | |
1422 | b_iter = b_new; | |
1423 | } | |
1424 | return NULL; | |
1425 | } | |
1426 | ||
1427 | /** | |
1428 | * Find the target block for the "next" jump | |
1429 | * @param blk the instruction block | |
1430 | * @param nxt the next offset | |
1431 | * | |
1432 | * Find the target block for the TGT_NXT jump using the given offset. Returns | |
1433 | * a pointer to the target block on success or NULL on failure. | |
1434 | * | |
1435 | */ | |
1436 | static struct bpf_blk *_gen_bpf_find_nxt(const struct bpf_blk *blk, | |
1437 | unsigned int nxt) | |
1438 | { | |
1439 | struct bpf_blk *iter = blk->next; | |
1440 | ||
1441 | for (; (iter != NULL) && (nxt > 0); nxt--) | |
1442 | iter = iter->next; | |
1443 | ||
1444 | return iter; | |
1445 | } | |
1446 | ||
1447 | /** | |
1448 | * Manage jumps to return instructions | |
1449 | * @param state the BPF state | |
1450 | * @param blk the instruction block to check | |
1451 | * @param offset the instruction offset into the instruction block | |
1452 | * @param blk_ret the return instruction block | |
1453 | * | |
1454 | * Using the given block and instruction offset, calculate the jump distance | |
1455 | * between the jumping instruction and return instruction block. If the jump | |
1456 | * distance is too great, duplicate the return instruction to reduce the | |
1457 | * distance to the maximum value. Returns 1 if a long jump was added, zero if | |
1458 | * the existing jump is valid, and negative values on failure. | |
1459 | * | |
1460 | */ | |
1461 | static int _gen_bpf_build_jmp_ret(struct bpf_state *state, | |
1462 | struct bpf_blk *blk, unsigned int offset, | |
1463 | struct bpf_blk *blk_ret) | |
1464 | { | |
1465 | unsigned int j_len; | |
1466 | uint64_t tgt_hash = blk_ret->hash; | |
1467 | struct bpf_blk *b_jmp, *b_new; | |
1468 | ||
1469 | /* calculate the jump distance */ | |
1470 | j_len = blk->blk_cnt - (offset + 1); | |
1471 | b_jmp = blk->next; | |
1472 | while (b_jmp != NULL && b_jmp != blk_ret && j_len < _BPF_JMP_MAX_RET) { | |
1473 | j_len += b_jmp->blk_cnt; | |
1474 | b_jmp = b_jmp->next; | |
1475 | } | |
1476 | if (b_jmp == NULL) | |
1477 | return -EFAULT; | |
1478 | if (j_len <= _BPF_JMP_MAX_RET && b_jmp == blk_ret) | |
1479 | return 0; | |
1480 | ||
1481 | /* we need a closer return instruction, see if one already exists */ | |
1482 | j_len = blk->blk_cnt - (offset + 1); | |
1483 | b_jmp = blk->next; | |
1484 | while (b_jmp != NULL && b_jmp->hash != tgt_hash && | |
1485 | j_len < _BPF_JMP_MAX_RET) { | |
1486 | j_len += b_jmp->blk_cnt; | |
1487 | b_jmp = b_jmp->next; | |
1488 | } | |
1489 | if (b_jmp == NULL) | |
1490 | return -EFAULT; | |
1491 | if (j_len <= _BPF_JMP_MAX_RET && b_jmp->hash == tgt_hash) | |
1492 | return 0; | |
1493 | ||
1494 | /* we need to insert a new return instruction - create one */ | |
1495 | b_new = _gen_bpf_action(state, NULL, blk_ret->blks[0].k.tgt.imm_k); | |
1496 | if (b_new == NULL) | |
1497 | return -EFAULT; | |
1498 | ||
1499 | /* NOTE - we need to be careful here, we're giving the block a hash | |
1500 | * value (this is a sneaky way to ensure we leverage the | |
1501 | * inserted long jumps as much as possible) but we never add the | |
1502 | * block to the hash table so it won't get cleaned up | |
1503 | * automatically */ | |
1504 | b_new->hash = tgt_hash; | |
1505 | ||
1506 | /* insert the jump after the current jumping block */ | |
1507 | b_new->prev = blk; | |
1508 | b_new->next = blk->next; | |
1509 | blk->next->prev = b_new; | |
1510 | blk->next = b_new; | |
1511 | ||
1512 | return 1; | |
1513 | } | |
1514 | ||
1515 | /** | |
1516 | * Manage jump lengths by duplicating and adding jumps if needed | |
1517 | * @param state the BPF state | |
1518 | * @param tail the tail of the instruction block list | |
1519 | * @param blk the instruction block to check | |
1520 | * @param offset the instruction offset into the instruction block | |
1521 | * @param tgt_hash the hash of the jump destination block | |
1522 | * | |
1523 | * Using the given block and instruction offset, calculate the jump distance | |
1524 | * between the jumping instruction and the destination. If the jump distance | |
1525 | * is too great, add a long jump instruction to reduce the distance to a legal | |
1526 | * value. Returns 1 if a new instruction was added, zero if the existing jump | |
1527 | * is valid, and negative values on failure. | |
1528 | * | |
1529 | */ | |
1530 | static int _gen_bpf_build_jmp(struct bpf_state *state, | |
1531 | struct bpf_blk *tail, | |
1532 | struct bpf_blk *blk, unsigned int offset, | |
1533 | uint64_t tgt_hash) | |
1534 | { | |
1535 | int rc; | |
1536 | unsigned int jmp_len; | |
1537 | struct bpf_instr instr; | |
1538 | struct bpf_blk *b_new, *b_jmp, *b_tgt; | |
1539 | ||
1540 | /* find the jump target */ | |
1541 | b_tgt = tail; | |
1542 | while (b_tgt != blk && b_tgt->hash != tgt_hash) | |
1543 | b_tgt = b_tgt->prev; | |
1544 | if (b_tgt == blk) | |
1545 | return -EFAULT; | |
1546 | ||
1547 | if (b_tgt->blk_cnt == 1 && | |
1548 | b_tgt->blks[0].op == _BPF_OP(state->arch, BPF_RET)) { | |
1549 | rc = _gen_bpf_build_jmp_ret(state, blk, offset, b_tgt); | |
1550 | if (rc == 1) | |
1551 | return 1; | |
1552 | else if (rc < 0) | |
1553 | return rc; | |
1554 | } | |
1555 | ||
1556 | /* calculate the jump distance */ | |
1557 | jmp_len = blk->blk_cnt - (offset + 1); | |
1558 | b_jmp = blk->next; | |
1559 | while (b_jmp != NULL && b_jmp != b_tgt && jmp_len < _BPF_JMP_MAX) { | |
1560 | jmp_len += b_jmp->blk_cnt; | |
1561 | b_jmp = b_jmp->next; | |
1562 | } | |
1563 | if (b_jmp == NULL) | |
1564 | return -EFAULT; | |
1565 | if (jmp_len <= _BPF_JMP_MAX && b_jmp == b_tgt) | |
1566 | return 0; | |
1567 | ||
1568 | /* we need a long jump, see if one already exists */ | |
1569 | jmp_len = blk->blk_cnt - (offset + 1); | |
1570 | b_jmp = blk->next; | |
1571 | while (b_jmp != NULL && b_jmp->hash != tgt_hash && | |
1572 | jmp_len < _BPF_JMP_MAX) { | |
1573 | jmp_len += b_jmp->blk_cnt; | |
1574 | b_jmp = b_jmp->next; | |
1575 | } | |
1576 | if (b_jmp == NULL) | |
1577 | return -EFAULT; | |
1578 | if (jmp_len <= _BPF_JMP_MAX && b_jmp->hash == tgt_hash) | |
1579 | return 0; | |
1580 | ||
1581 | /* we need to insert a long jump - create one */ | |
1582 | _BPF_INSTR(instr, _BPF_OP(state->arch, BPF_JMP + BPF_JA), | |
1583 | _BPF_JMP_NO, _BPF_JMP_NO, _BPF_JMP_HSH(tgt_hash)); | |
1584 | b_new = _blk_append(state, NULL, &instr); | |
1585 | if (b_new == NULL) | |
1586 | return -EFAULT; | |
1587 | ||
1588 | /* NOTE - we need to be careful here, we're giving the block a hash | |
1589 | * value (this is a sneaky way to ensure we leverage the | |
1590 | * inserted long jumps as much as possible) but we never add the | |
1591 | * block to the hash table so it won't get cleaned up | |
1592 | * automatically */ | |
1593 | b_new->hash = tgt_hash; | |
1594 | ||
1595 | /* insert the jump after the current jumping block */ | |
1596 | b_new->prev = blk; | |
1597 | b_new->next = blk->next; | |
1598 | blk->next->prev = b_new; | |
1599 | blk->next = b_new; | |
1600 | ||
1601 | return 1; | |
1602 | } | |
1603 | ||
1604 | /** | |
1605 | * Generate the BPF program for the given filter collection | |
1606 | * @param state the BPF state | |
1607 | * @param col the filter collection | |
1608 | * | |
1609 | * Generate the BPF program for the given filter collection. Returns zero on | |
1610 | * success, negative values on failure. | |
1611 | * | |
1612 | */ | |
1613 | static int _gen_bpf_build_bpf(struct bpf_state *state, | |
1614 | const struct db_filter_col *col) | |
1615 | { | |
1616 | int rc; | |
1617 | int iter; | |
1618 | uint64_t h_val; | |
1619 | unsigned int res_cnt; | |
1620 | unsigned int jmp_len; | |
1621 | int arch_x86_64 = -1, arch_x32 = -1; | |
1622 | struct bpf_instr instr; | |
1623 | struct bpf_instr *i_iter; | |
1624 | struct bpf_blk *b_badarch, *b_default; | |
1625 | struct bpf_blk *b_head = NULL, *b_tail = NULL, *b_iter, *b_new, *b_jmp; | |
1626 | struct db_filter *db_secondary = NULL; | |
1627 | struct arch_def pseudo_arch; | |
1628 | ||
1629 | if (col->filter_cnt == 0) | |
1630 | return -EINVAL; | |
1631 | ||
1632 | /* create a fake architecture definition for use in the early stages */ | |
1633 | memset(&pseudo_arch, 0, sizeof(pseudo_arch)); | |
1634 | pseudo_arch.endian = col->endian; | |
1635 | state->arch = &pseudo_arch; | |
1636 | ||
1637 | /* generate the badarch action */ | |
1638 | b_badarch = _gen_bpf_action(state, NULL, state->attr->act_badarch); | |
1639 | if (b_badarch == NULL) | |
1640 | return -ENOMEM; | |
1641 | rc = _hsh_add(state, &b_badarch, 1); | |
1642 | if (rc < 0) | |
1643 | return rc; | |
1644 | state->bad_arch_hsh = b_badarch->hash; | |
1645 | ||
1646 | /* generate the default action */ | |
1647 | b_default = _gen_bpf_action(state, NULL, state->attr->act_default); | |
1648 | if (b_default == NULL) | |
1649 | return -ENOMEM; | |
1650 | rc = _hsh_add(state, &b_default, 0); | |
1651 | if (rc < 0) | |
1652 | return rc; | |
1653 | state->def_hsh = b_default->hash; | |
1654 | ||
1655 | /* load the architecture token/number */ | |
1656 | _BPF_INSTR(instr, _BPF_OP(state->arch, BPF_LD + BPF_ABS), | |
1657 | _BPF_JMP_NO, _BPF_JMP_NO, _BPF_ARCH(state->arch)); | |
1658 | b_head = _blk_append(state, NULL, &instr); | |
1659 | if (b_head == NULL) | |
1660 | return -ENOMEM; | |
1661 | b_head->acc_end = _ACC_STATE_OFFSET(_BPF_OFFSET_ARCH); | |
1662 | rc = _hsh_add(state, &b_head, 1); | |
1663 | if (rc < 0) | |
1664 | return rc; | |
1665 | b_tail = b_head; | |
1666 | ||
1667 | /* generate the per-architecture filters */ | |
1668 | for (iter = 0; iter < col->filter_cnt; iter++) { | |
1669 | if (col->filters[iter]->arch->token == SCMP_ARCH_X86_64) | |
1670 | arch_x86_64 = iter; | |
1671 | if (col->filters[iter]->arch->token == SCMP_ARCH_X32) | |
1672 | arch_x32 = iter; | |
1673 | } | |
1674 | for (iter = 0; iter < col->filter_cnt; iter++) { | |
1675 | /* figure out the secondary arch filter mess */ | |
1676 | if (iter == arch_x86_64) { | |
1677 | if (arch_x32 > iter) | |
1678 | db_secondary = col->filters[arch_x32]; | |
1679 | else if (arch_x32 >= 0) | |
1680 | continue; | |
1681 | } else if (iter == arch_x32) { | |
1682 | if (arch_x86_64 > iter) | |
1683 | db_secondary = col->filters[arch_x86_64]; | |
1684 | else if (arch_x86_64 >= 0) | |
1685 | continue; | |
1686 | } else | |
1687 | db_secondary = NULL; | |
1688 | ||
1689 | /* create the filter for the architecture(s) */ | |
1690 | b_new = _gen_bpf_arch(state, col->filters[iter], db_secondary); | |
1691 | if (b_new == NULL) | |
1692 | return -ENOMEM; | |
1693 | b_new->prev = b_tail; | |
1694 | b_tail->next = b_new; | |
1695 | b_tail = b_new; | |
1696 | while (b_tail->next != NULL) | |
1697 | b_tail = b_tail->next; | |
1698 | } | |
1699 | ||
1700 | /* add a badarch action to the end */ | |
1701 | b_badarch->prev = b_tail; | |
1702 | b_badarch->next = NULL; | |
1703 | b_tail->next = b_badarch; | |
1704 | b_tail = b_badarch; | |
1705 | ||
1706 | /* reset the state to the pseudo_arch for the final resolution */ | |
1707 | state->arch = &pseudo_arch; | |
1708 | ||
1709 | /* resolve any TGT_NXT jumps at the top level */ | |
1710 | b_iter = b_head; | |
1711 | do { | |
1712 | for (iter = 0; iter < b_iter->blk_cnt; iter++) { | |
1713 | i_iter = &b_iter->blks[iter]; | |
1714 | if (i_iter->jt.type == TGT_NXT) { | |
1715 | b_jmp = _gen_bpf_find_nxt(b_iter, | |
1716 | i_iter->jt.tgt.nxt); | |
1717 | if (b_jmp == NULL) | |
1718 | return -EFAULT; | |
1719 | i_iter->jt = _BPF_JMP_HSH(b_jmp->hash); | |
1720 | } | |
1721 | if (i_iter->jf.type == TGT_NXT) { | |
1722 | b_jmp = _gen_bpf_find_nxt(b_iter, | |
1723 | i_iter->jf.tgt.nxt); | |
1724 | if (b_jmp == NULL) | |
1725 | return -EFAULT; | |
1726 | i_iter->jf = _BPF_JMP_HSH(b_jmp->hash); | |
1727 | } | |
1728 | /* we shouldn't need to worry about a TGT_NXT in k */ | |
1729 | } | |
1730 | b_iter = b_iter->next; | |
1731 | } while (b_iter != NULL && b_iter->next != NULL); | |
1732 | ||
1733 | /* pull in all of the TGT_PTR_HSH jumps, one layer at a time */ | |
1734 | b_iter = b_tail; | |
1735 | do { | |
1736 | b_jmp = NULL; | |
1737 | /* look for jumps - backwards (shorter jumps) */ | |
1738 | for (iter = b_iter->blk_cnt - 1; | |
1739 | (iter >= 0) && (b_jmp == NULL); | |
1740 | iter--) { | |
1741 | i_iter = &b_iter->blks[iter]; | |
1742 | if (i_iter->jt.type == TGT_PTR_HSH) | |
1743 | b_jmp = _hsh_find_once(state, | |
1744 | i_iter->jt.tgt.hash); | |
1745 | if (b_jmp == NULL && i_iter->jf.type == TGT_PTR_HSH) | |
1746 | b_jmp = _hsh_find_once(state, | |
1747 | i_iter->jf.tgt.hash); | |
1748 | if (b_jmp == NULL && i_iter->k.type == TGT_PTR_HSH) | |
1749 | b_jmp = _hsh_find_once(state, | |
1750 | i_iter->k.tgt.hash); | |
1751 | if (b_jmp != NULL) { | |
1752 | /* do we need to reload the accumulator? */ | |
1753 | if ((b_jmp->acc_start.offset != -1) && | |
1754 | !_ACC_CMP_EQ(b_iter->acc_end, | |
1755 | b_jmp->acc_start)) { | |
1756 | if (b_jmp->acc_start.mask != ARG_MASK_MAX) { | |
1757 | _BPF_INSTR(instr, | |
1758 | _BPF_OP(state->arch, | |
1759 | BPF_ALU + BPF_AND), | |
1760 | _BPF_JMP_NO, | |
1761 | _BPF_JMP_NO, | |
1762 | _BPF_K(state->arch, | |
1763 | b_jmp->acc_start.mask)); | |
1764 | b_jmp = _blk_prepend(state, | |
1765 | b_jmp, | |
1766 | &instr); | |
1767 | if (b_jmp == NULL) | |
1768 | return -EFAULT; | |
1769 | } | |
1770 | _BPF_INSTR(instr, | |
1771 | _BPF_OP(state->arch, | |
1772 | BPF_LD + BPF_ABS), | |
1773 | _BPF_JMP_NO, _BPF_JMP_NO, | |
1774 | _BPF_K(state->arch, | |
1775 | b_jmp->acc_start.offset)); | |
1776 | b_jmp = _blk_prepend(state, | |
1777 | b_jmp, &instr); | |
1778 | if (b_jmp == NULL) | |
1779 | return -EFAULT; | |
1780 | /* not reliant on the accumulator */ | |
1781 | b_jmp->acc_start = _ACC_STATE_UNDEF; | |
1782 | } | |
1783 | /* insert the new block after this block */ | |
1784 | b_jmp->prev = b_iter; | |
1785 | b_jmp->next = b_iter->next; | |
1786 | b_iter->next = b_jmp; | |
1787 | if (b_jmp->next) | |
1788 | b_jmp->next->prev = b_jmp; | |
1789 | } | |
1790 | } | |
1791 | if (b_jmp != NULL) { | |
1792 | while (b_tail->next != NULL) | |
1793 | b_tail = b_tail->next; | |
1794 | b_iter = b_tail; | |
1795 | } else | |
1796 | b_iter = b_iter->prev; | |
1797 | } while (b_iter != NULL); | |
1798 | ||
1799 | ||
1800 | /* NOTE - from here to the end of the function we need to fail via the | |
1801 | * the build_bpf_free_blks label, not just return an error; see | |
1802 | * the _gen_bpf_build_jmp() function for details */ | |
1803 | ||
1804 | /* check for long jumps and insert if necessary, we also verify that | |
1805 | * all our jump targets are valid at this point in the process */ | |
1806 | b_iter = b_tail; | |
1807 | do { | |
1808 | res_cnt = 0; | |
1809 | for (iter = b_iter->blk_cnt - 1; iter >= 0; iter--) { | |
1810 | i_iter = &b_iter->blks[iter]; | |
1811 | switch (i_iter->jt.type) { | |
1812 | case TGT_NONE: | |
1813 | case TGT_IMM: | |
1814 | break; | |
1815 | case TGT_PTR_HSH: | |
1816 | h_val = i_iter->jt.tgt.hash; | |
1817 | rc = _gen_bpf_build_jmp(state, b_tail, | |
1818 | b_iter, iter, | |
1819 | h_val); | |
1820 | if (rc < 0) | |
1821 | goto build_bpf_free_blks; | |
1822 | res_cnt += rc; | |
1823 | break; | |
1824 | default: | |
1825 | /* fatal error */ | |
1826 | goto build_bpf_free_blks; | |
1827 | } | |
1828 | switch (i_iter->jf.type) { | |
1829 | case TGT_NONE: | |
1830 | case TGT_IMM: | |
1831 | break; | |
1832 | case TGT_PTR_HSH: | |
1833 | h_val = i_iter->jf.tgt.hash; | |
1834 | rc = _gen_bpf_build_jmp(state, b_tail, | |
1835 | b_iter, iter, | |
1836 | h_val); | |
1837 | if (rc < 0) | |
1838 | goto build_bpf_free_blks; | |
1839 | res_cnt += rc; | |
1840 | break; | |
1841 | default: | |
1842 | /* fatal error */ | |
1843 | goto build_bpf_free_blks; | |
1844 | } | |
1845 | } | |
1846 | if (res_cnt == 0) | |
1847 | b_iter = b_iter->prev; | |
1848 | } while (b_iter != NULL); | |
1849 | ||
1850 | /* build the bpf program */ | |
1851 | do { | |
1852 | b_iter = b_head; | |
1853 | /* resolve the TGT_PTR_HSH jumps */ | |
1854 | for (iter = 0; iter < b_iter->blk_cnt; iter++) { | |
1855 | i_iter = &b_iter->blks[iter]; | |
1856 | if (i_iter->jt.type == TGT_PTR_HSH) { | |
1857 | h_val = i_iter->jt.tgt.hash; | |
1858 | jmp_len = b_iter->blk_cnt - (iter + 1); | |
1859 | b_jmp = b_iter->next; | |
1860 | while (b_jmp != NULL && b_jmp->hash != h_val) { | |
1861 | jmp_len += b_jmp->blk_cnt; | |
1862 | b_jmp = b_jmp->next; | |
1863 | } | |
1864 | if (b_jmp == NULL || jmp_len > _BPF_JMP_MAX) | |
1865 | goto build_bpf_free_blks; | |
1866 | i_iter->jt = _BPF_JMP_IMM(jmp_len); | |
1867 | } | |
1868 | if (i_iter->jf.type == TGT_PTR_HSH) { | |
1869 | h_val = i_iter->jf.tgt.hash; | |
1870 | jmp_len = b_iter->blk_cnt - (iter + 1); | |
1871 | b_jmp = b_iter->next; | |
1872 | while (b_jmp != NULL && b_jmp->hash != h_val) { | |
1873 | jmp_len += b_jmp->blk_cnt; | |
1874 | b_jmp = b_jmp->next; | |
1875 | } | |
1876 | if (b_jmp == NULL || jmp_len > _BPF_JMP_MAX) | |
1877 | goto build_bpf_free_blks; | |
1878 | i_iter->jf = _BPF_JMP_IMM(jmp_len); | |
1879 | } | |
1880 | if (i_iter->k.type == TGT_PTR_HSH) { | |
1881 | h_val = i_iter->k.tgt.hash; | |
1882 | jmp_len = b_iter->blk_cnt - (iter + 1); | |
1883 | b_jmp = b_tail; | |
1884 | while (b_jmp->hash != h_val) | |
1885 | b_jmp = b_jmp->prev; | |
1886 | b_jmp = b_jmp->prev; | |
1887 | while (b_jmp != b_iter) { | |
1888 | jmp_len += b_jmp->blk_cnt; | |
1889 | b_jmp = b_jmp->prev; | |
1890 | } | |
1891 | if (b_jmp == NULL) | |
1892 | goto build_bpf_free_blks; | |
1893 | i_iter->k = _BPF_K(state->arch, jmp_len); | |
1894 | } | |
1895 | } | |
1896 | ||
1897 | /* build the bpf program */ | |
1898 | if (_bpf_append_blk(state->bpf, b_iter) < 0) | |
1899 | goto build_bpf_free_blks; | |
1900 | ||
1901 | /* we're done with the block, free it */ | |
1902 | b_head = b_iter->next; | |
1903 | _blk_free(state, b_iter); | |
1904 | } while (b_head != NULL); | |
1905 | ||
1906 | return 0; | |
1907 | ||
1908 | build_bpf_free_blks: | |
1909 | b_iter = b_head; | |
1910 | while (b_iter != NULL) { | |
1911 | b_jmp = b_iter->next; | |
1912 | _hsh_remove(state, b_iter->hash); | |
1913 | __blk_free(state, b_iter); | |
1914 | b_iter = b_jmp; | |
1915 | } | |
1916 | return -EFAULT; | |
1917 | } | |
1918 | ||
1919 | /** | |
1920 | * Generate a BPF representation of the filter DB | |
1921 | * @param col the seccomp filter collection | |
1922 | * | |
1923 | * This function generates a BPF representation of the given filter collection. | |
1924 | * Returns a pointer to a valid bpf_program on success, NULL on failure. | |
1925 | * | |
1926 | */ | |
1927 | struct bpf_program *gen_bpf_generate(const struct db_filter_col *col) | |
1928 | { | |
1929 | int rc; | |
1930 | struct bpf_state state; | |
1931 | ||
1932 | memset(&state, 0, sizeof(state)); | |
1933 | state.attr = &col->attr; | |
1934 | ||
1935 | state.bpf = malloc(sizeof(*(state.bpf))); | |
1936 | if (state.bpf == NULL) | |
1937 | return NULL; | |
1938 | memset(state.bpf, 0, sizeof(*(state.bpf))); | |
1939 | ||
1940 | rc = _gen_bpf_build_bpf(&state, col); | |
1941 | if (rc < 0) | |
1942 | goto bpf_generate_end; | |
1943 | ||
1944 | bpf_generate_end: | |
1945 | if (rc < 0) | |
1946 | _state_release(&state); | |
1947 | return state.bpf; | |
1948 | } | |
1949 | ||
1950 | /** | |
1951 | * Free memory associated with a BPF representation | |
1952 | * @param program the BPF representation | |
1953 | * | |
1954 | * Free the memory associated with a BPF representation generated by the | |
1955 | * gen_bpf_generate() function. | |
1956 | * | |
1957 | */ | |
1958 | void gen_bpf_release(struct bpf_program *program) | |
1959 | { | |
1960 | _program_free(program); | |
1961 | } |