| 1 | //------------------------------------------------------------------------------ |
| 2 | // Copyright (c) 2016 by Lukasz Janyst <lukasz@jany.st> |
| 3 | //------------------------------------------------------------------------------ |
| 4 | // This file is part of thread-bites. |
| 5 | // |
| 6 | // thread-bites is free software: you can redistribute it and/or modify |
| 7 | // it under the terms of the GNU General Public License as published by |
| 8 | // the Free Software Foundation, either version 3 of the License, or |
| 9 | // (at your option) any later version. |
| 10 | // |
| 11 | // thread-bites is distributed in the hope that it will be useful, |
| 12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | // GNU General Public License for more details. |
| 15 | // |
| 16 | // You should have received a copy of the GNU General Public License |
| 17 | // along with thread-bites. If not, see <http://www.gnu.org/licenses/>. |
| 18 | //------------------------------------------------------------------------------ |
| 19 | |
| 20 | #include "tb.h" |
| 21 | #include "tb-private.h" |
| 22 | |
| 23 | #include <limits.h> |
| 24 | #include <stdint.h> |
| 25 | #include <string.h> |
| 26 | #include <linux/sched.h> |
| 27 | #include <linux/mman.h> |
| 28 | #include <asm-generic/mman-common.h> |
| 29 | #include <asm-generic/param.h> |
| 30 | #include <linux/futex.h> |
| 31 | #include <asm/prctl.h> |
| 32 | |
| 33 | //------------------------------------------------------------------------------ |
| 34 | // Prototypes and globals |
| 35 | //------------------------------------------------------------------------------ |
| 36 | static void release_descriptor(tbthread_t desc); |
| 37 | static struct tbthread *get_descriptor(); |
| 38 | tbthread_mutex_t desc_mutex = TBTHREAD_MUTEX_INITIALIZER; |
| 39 | int tb_pid = 0; |
| 40 | |
| 41 | //------------------------------------------------------------------------------ |
| 42 | // Initialize threading |
| 43 | //------------------------------------------------------------------------------ |
| 44 | static void *glibc_thread_desc; |
| 45 | void tbthread_init() |
| 46 | { |
| 47 | glibc_thread_desc = tbthread_self(); |
| 48 | tbthread_t thread = malloc(sizeof(struct tbthread)); |
| 49 | memset(thread, 0, sizeof(struct tbthread)); |
| 50 | thread->self = thread; |
| 51 | thread->sched_info = SCHED_INFO_PACK(SCHED_NORMAL, 0); |
| 52 | SYSCALL2(__NR_arch_prctl, ARCH_SET_FS, thread); |
| 53 | tb_pid = SYSCALL0(__NR_getpid); |
| 54 | thread->tid = tb_pid; |
| 55 | |
| 56 | struct sigaction sa; |
| 57 | memset(&sa, 0, sizeof(struct sigaction)); |
| 58 | sa.sa_handler = (__sighandler_t)tb_cancel_handler; |
| 59 | sa.sa_flags = SA_SIGINFO; |
| 60 | tbsigaction(SIGCANCEL, &sa, 0); |
| 61 | } |
| 62 | |
| 63 | //------------------------------------------------------------------------------ |
| 64 | // Finalize threading |
| 65 | //------------------------------------------------------------------------------ |
| 66 | void tbthread_finit() |
| 67 | { |
| 68 | free(tbthread_self()); |
| 69 | SYSCALL2(__NR_arch_prctl, ARCH_SET_FS, glibc_thread_desc); |
| 70 | } |
| 71 | |
| 72 | //------------------------------------------------------------------------------ |
| 73 | // Init the attrs to the defaults |
| 74 | //------------------------------------------------------------------------------ |
| 75 | void tbthread_attr_init(tbthread_attr_t *attr) |
| 76 | { |
| 77 | memset(attr, 0, sizeof(tbthread_attr_t)); |
| 78 | attr->stack_size = 8192 * 1024; |
| 79 | attr->joinable = 1; |
| 80 | attr->sched_inherit = TBTHREAD_INHERIT_SCHED; |
| 81 | } |
| 82 | |
| 83 | int tbthread_attr_setdetachstate(tbthread_attr_t *attr, int state) |
| 84 | { |
| 85 | if(state == TBTHREAD_CREATE_DETACHED) |
| 86 | attr->joinable = 0; |
| 87 | else |
| 88 | attr->joinable = 1; |
| 89 | } |
| 90 | |
| 91 | //------------------------------------------------------------------------------ |
| 92 | // Thread function wrapper |
| 93 | //------------------------------------------------------------------------------ |
| 94 | static int start_thread(void *arg) |
| 95 | { |
| 96 | tbthread_t th = (tbthread_t)arg; |
| 97 | |
| 98 | //---------------------------------------------------------------------------- |
| 99 | // Wait until we can run the user function |
| 100 | //---------------------------------------------------------------------------- |
| 101 | if(th->start_status != TB_START_OK) { |
| 102 | SYSCALL3(__NR_futex, &th->start_status, FUTEX_WAIT, TB_START_WAIT); |
| 103 | if(th->start_status == TB_START_EXIT) |
| 104 | SYSCALL1(__NR_exit, 0); |
| 105 | } |
| 106 | |
| 107 | //---------------------------------------------------------------------------- |
| 108 | // Run the user function |
| 109 | //---------------------------------------------------------------------------- |
| 110 | void *ret = th->fn(th->arg); |
| 111 | tbthread_setcancelstate(TBTHREAD_CANCEL_DISABLE, 0); |
| 112 | tb_clear_cleanup_handlers(); |
| 113 | tbthread_exit(ret); |
| 114 | return 0; |
| 115 | } |
| 116 | |
| 117 | //------------------------------------------------------------------------------ |
| 118 | // Terminate the current thread |
| 119 | //------------------------------------------------------------------------------ |
| 120 | void tbthread_exit(void *retval) |
| 121 | { |
| 122 | tbthread_t th = tbthread_self(); |
| 123 | uint32_t stack_size = th->stack_size; |
| 124 | void *stack = th->stack; |
| 125 | int free_desc = 0; |
| 126 | |
| 127 | th->retval = retval; |
| 128 | tb_call_cleanup_handlers(); |
| 129 | tb_tls_call_destructors(); |
| 130 | |
| 131 | tbthread_mutex_lock(&desc_mutex); |
| 132 | if(th->join_status == TB_DETACHED) |
| 133 | free_desc = 1; |
| 134 | th->join_status = TB_JOINABLE_FIXED; |
| 135 | tbthread_mutex_unlock(&desc_mutex); |
| 136 | |
| 137 | if(free_desc) |
| 138 | release_descriptor(th); |
| 139 | |
| 140 | //---------------------------------------------------------------------------- |
| 141 | // Free the stack and exit. We do it this way because we remove the stack from |
| 142 | // underneath our feet and cannot allow the C code to write on it anymore. |
| 143 | //---------------------------------------------------------------------------- |
| 144 | register long a1 asm("rdi" ) = (long)stack; |
| 145 | register long a2 asm("rsi" ) = stack_size; |
| 146 | asm volatile( |
| 147 | "syscall\n\t" |
| 148 | "movq $60, %%rax\n\t" // 60 = __NR_exit |
| 149 | "movq $0, %%rdi\n\t" |
| 150 | "syscall" |
| 151 | : |
| 152 | : "a" (__NR_munmap), "r" (a1), "r" (a2) |
| 153 | : "memory" , "cc" , "r11" , "cx" ); |
| 154 | } |
| 155 | |
| 156 | //------------------------------------------------------------------------------ |
| 157 | // Wait for exit |
| 158 | //------------------------------------------------------------------------------ |
| 159 | static void wait_for_thread(tbthread_t thread) |
| 160 | { |
| 161 | uint32_t tid = thread->tid; |
| 162 | long ret = 0; |
| 163 | if(tid != 0) |
| 164 | do { |
| 165 | ret = SYSCALL3(__NR_futex, &thread->tid, FUTEX_WAIT, tid); |
| 166 | } while(ret != -EWOULDBLOCK && ret != 0); |
| 167 | } |
| 168 | |
| 169 | //------------------------------------------------------------------------------ |
| 170 | // Descriptor lists |
| 171 | //------------------------------------------------------------------------------ |
| 172 | list_t used_desc; |
| 173 | static list_t free_desc; |
| 174 | |
| 175 | //------------------------------------------------------------------------------ |
| 176 | // Get a descriptor |
| 177 | //------------------------------------------------------------------------------ |
| 178 | static struct tbthread *get_descriptor() |
| 179 | { |
| 180 | tbthread_t desc = 0; |
| 181 | list_t *node = 0; |
| 182 | |
| 183 | //---------------------------------------------------------------------------- |
| 184 | // Try to re-use a thread descriptor and make sure that the corresponding |
| 185 | // thread has actually exited |
| 186 | //---------------------------------------------------------------------------- |
| 187 | tbthread_mutex_lock(&desc_mutex); |
| 188 | node = free_desc.next; |
| 189 | if(node) |
| 190 | list_rm(node); |
| 191 | tbthread_mutex_unlock(&desc_mutex); |
| 192 | |
| 193 | if(node) { |
| 194 | desc = (tbthread_t)node->element; |
| 195 | wait_for_thread(desc); |
| 196 | } |
| 197 | |
| 198 | //---------------------------------------------------------------------------- |
| 199 | // We don't have any free descriptors so we allocate and add to the list of |
| 200 | // used descriptors |
| 201 | //---------------------------------------------------------------------------- |
| 202 | if(!node) { |
| 203 | desc = malloc(sizeof(struct tbthread)); |
| 204 | node = malloc(sizeof(list_t)); |
| 205 | node->element = desc; |
| 206 | } |
| 207 | |
| 208 | tbthread_mutex_lock(&desc_mutex); |
| 209 | |
| 210 | list_add(&used_desc, node, 0); |
| 211 | tbthread_mutex_unlock(&desc_mutex); |
| 212 | return desc; |
| 213 | } |
| 214 | |
| 215 | //------------------------------------------------------------------------------ |
| 216 | // Release a descriptor |
| 217 | //------------------------------------------------------------------------------ |
| 218 | static void release_descriptor(tbthread_t desc) |
| 219 | { |
| 220 | tbthread_mutex_lock(&desc_mutex); |
| 221 | list_t *node = list_find_elem(&used_desc, desc); |
| 222 | if(!node) { |
| 223 | tbprint("Releasing unknown descriptor: 0x%llx! Scared and confused!\n" , |
| 224 | desc); |
| 225 | // abort! |
| 226 | } |
| 227 | list_rm(node); |
| 228 | list_add(&free_desc, node, 0); |
| 229 | tbthread_mutex_unlock(&desc_mutex); |
| 230 | } |
| 231 | |
| 232 | //------------------------------------------------------------------------------ |
| 233 | // Spawn a thread |
| 234 | //------------------------------------------------------------------------------ |
| 235 | int tbthread_create( |
| 236 | tbthread_t *thread, |
| 237 | const tbthread_attr_t *attr, |
| 238 | void *(*f)(void *), |
| 239 | void *arg) |
| 240 | { |
| 241 | int ret = 0; |
| 242 | *thread = 0; |
| 243 | |
| 244 | //---------------------------------------------------------------------------- |
| 245 | // Allocate the stack with a guard page at the end so that we could protect |
| 246 | // from overflows (by receiving a SIGSEGV) |
| 247 | //---------------------------------------------------------------------------- |
| 248 | void *stack = tbmmap(NULL, attr->stack_size, PROT_READ | PROT_WRITE, |
| 249 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
| 250 | long status = (long)stack; |
| 251 | if(status < 0) |
| 252 | return status; |
| 253 | |
| 254 | status = SYSCALL3(__NR_mprotect, stack, EXEC_PAGESIZE, PROT_NONE); |
| 255 | if(status < 0) { |
| 256 | ret = status; |
| 257 | goto error; |
| 258 | } |
| 259 | |
| 260 | //---------------------------------------------------------------------------- |
| 261 | // Pack everything up |
| 262 | //---------------------------------------------------------------------------- |
| 263 | *thread = get_descriptor(); |
| 264 | memset(*thread, 0, sizeof(struct tbthread)); |
| 265 | (*thread)->self = *thread; |
| 266 | (*thread)->stack = stack; |
| 267 | (*thread)->stack_size = attr->stack_size; |
| 268 | (*thread)->fn = f; |
| 269 | (*thread)->arg = arg; |
| 270 | (*thread)->join_status = attr->joinable; |
| 271 | (*thread)->cancel_status = TB_CANCEL_ENABLED | TB_CANCEL_DEFERRED; |
| 272 | |
| 273 | //---------------------------------------------------------------------------- |
| 274 | // If we set a scheduling policy, we need to make sure that the thread goes to |
| 275 | // sleep immediately after it starts so that we can make sure that we can |
| 276 | // successfuly set the before the user function executes. |
| 277 | //---------------------------------------------------------------------------- |
| 278 | if(!attr->sched_inherit) |
| 279 | (*thread)->start_status = TB_START_WAIT; |
| 280 | else { |
| 281 | tbthread_t self = tbthread_self(); |
| 282 | (*thread)->sched_info = self->sched_info; |
| 283 | } |
| 284 | |
| 285 | //---------------------------------------------------------------------------- |
| 286 | // Spawn the thread |
| 287 | //---------------------------------------------------------------------------- |
| 288 | int flags = CLONE_VM | CLONE_FS | CLONE_FILES | CLONE_SYSVSEM | CLONE_SIGHAND; |
| 289 | flags |= CLONE_THREAD | CLONE_SETTLS; |
| 290 | flags |= CLONE_CHILD_SETTID | CLONE_CHILD_CLEARTID; |
| 291 | |
| 292 | int tid = tbclone(start_thread, *thread, flags, stack+attr->stack_size, |
| 293 | 0, &(*thread)->tid, *thread); |
| 294 | if(tid < 0) { |
| 295 | ret = tid; |
| 296 | goto error; |
| 297 | } |
| 298 | |
| 299 | //---------------------------------------------------------------------------- |
| 300 | // It may happen that we will start to wait for this futex before the kernel |
| 301 | // manages to fill it with something meaningful. |
| 302 | //---------------------------------------------------------------------------- |
| 303 | (*thread)->tid = tid; |
| 304 | |
| 305 | //---------------------------------------------------------------------------- |
| 306 | // Set scheduling policy. If we succeed, we let the thread run. If not, we |
| 307 | // wait for it to exit; |
| 308 | //---------------------------------------------------------------------------- |
| 309 | if(!attr->sched_inherit) { |
| 310 | ret = tb_set_sched(*thread, attr->sched_policy, attr->sched_priority); |
| 311 | |
| 312 | if(ret) (*thread)->start_status = TB_START_EXIT; |
| 313 | else (*thread)->start_status = TB_START_OK; |
| 314 | SYSCALL3(__NR_futex, &(*thread)->start_status, FUTEX_WAKE, 1); |
| 315 | |
| 316 | if(ret) { |
| 317 | wait_for_thread(*thread); |
| 318 | goto error; |
| 319 | } |
| 320 | } |
| 321 | return 0; |
| 322 | |
| 323 | error: |
| 324 | tbmunmap(stack, attr->stack_size); |
| 325 | release_descriptor(*thread); |
| 326 | return ret; |
| 327 | } |
| 328 | |
| 329 | //------------------------------------------------------------------------------ |
| 330 | // Detach a thread |
| 331 | //------------------------------------------------------------------------------ |
| 332 | int tbthread_detach(tbthread_t thread) |
| 333 | { |
| 334 | int ret = 0; |
| 335 | |
| 336 | tbthread_mutex_lock(&desc_mutex); |
| 337 | if(!list_find_elem(&used_desc, thread)) { |
| 338 | ret = -ESRCH; |
| 339 | goto exit; |
| 340 | } |
| 341 | |
| 342 | if(thread->join_status == TB_JOINABLE_FIXED) { |
| 343 | ret = -EINVAL; |
| 344 | goto exit; |
| 345 | } |
| 346 | |
| 347 | thread->join_status = TB_DETACHED; |
| 348 | exit: |
| 349 | tbthread_mutex_unlock(&desc_mutex); |
| 350 | return ret; |
| 351 | } |
| 352 | |
| 353 | //------------------------------------------------------------------------------ |
| 354 | // Join a thread |
| 355 | //------------------------------------------------------------------------------ |
| 356 | int tbthread_join(tbthread_t thread, void **retval) |
| 357 | { |
| 358 | tbthread_t self = tbthread_self(); |
| 359 | int ret = 0; |
| 360 | |
| 361 | //---------------------------------------------------------------------------- |
| 362 | // Check if the thread may be joined |
| 363 | //---------------------------------------------------------------------------- |
| 364 | tbthread_mutex_lock(&desc_mutex); |
| 365 | |
| 366 | if(thread == self) { |
| 367 | ret = -EDEADLK; |
| 368 | goto error; |
| 369 | } |
| 370 | |
| 371 | if(!list_find_elem(&used_desc, thread)) { |
| 372 | ret = -ESRCH; |
| 373 | goto error; |
| 374 | } |
| 375 | |
| 376 | if(thread->join_status == TB_DETACHED) { |
| 377 | ret = -EINVAL; |
| 378 | goto error; |
| 379 | } |
| 380 | |
| 381 | if(self->joiner == thread) { |
| 382 | ret = -EDEADLK; |
| 383 | goto error; |
| 384 | } |
| 385 | |
| 386 | if(thread->joiner) { |
| 387 | ret = -EINVAL; |
| 388 | goto error; |
| 389 | } |
| 390 | |
| 391 | thread->join_status = TB_JOINABLE_FIXED; |
| 392 | thread->joiner = self; |
| 393 | |
| 394 | //---------------------------------------------------------------------------- |
| 395 | // We can release the lock now, because we're responsible for releasing the |
| 396 | // thread descriptor now, so it's not going to go away. |
| 397 | //---------------------------------------------------------------------------- |
| 398 | tbthread_mutex_unlock(&desc_mutex); |
| 399 | |
| 400 | wait_for_thread(thread); |
| 401 | if(retval) |
| 402 | *retval = thread->retval; |
| 403 | release_descriptor(thread); |
| 404 | return 0; |
| 405 | |
| 406 | error: |
| 407 | tbthread_mutex_unlock(&desc_mutex); |
| 408 | return ret; |
| 409 | } |
| 410 | |
| 411 | //------------------------------------------------------------------------------ |
| 412 | // Thread equal |
| 413 | //------------------------------------------------------------------------------ |
| 414 | int tbthread_equal(tbthread_t t1, tbthread_t t2) |
| 415 | { |
| 416 | return t1 == t2; |
| 417 | } |
| 418 | |
| 419 | //------------------------------------------------------------------------------ |
| 420 | // Once cancel cleanup |
| 421 | //------------------------------------------------------------------------------ |
| 422 | static void once_cleanup(void *arg) |
| 423 | { |
| 424 | tbthread_once_t *once = (tbthread_once_t *)arg; |
| 425 | *once = TB_ONCE_NEW; |
| 426 | SYSCALL3(__NR_futex, once, FUTEX_WAKE, INT_MAX); |
| 427 | } |
| 428 | |
| 429 | //------------------------------------------------------------------------------ |
| 430 | // Run the code once |
| 431 | //------------------------------------------------------------------------------ |
| 432 | int tbthread_once(tbthread_once_t *once, void (*func)(void)) |
| 433 | { |
| 434 | if(!once || !func) |
| 435 | return -EINVAL; |
| 436 | |
| 437 | int cancel_state; |
| 438 | |
| 439 | while(1) { |
| 440 | if(*once == TB_ONCE_DONE) |
| 441 | return 0; |
| 442 | |
| 443 | //-------------------------------------------------------------------------- |
| 444 | // The executor |
| 445 | //-------------------------------------------------------------------------- |
| 446 | tbthread_setcancelstate(TBTHREAD_CANCEL_DISABLE, &cancel_state); |
| 447 | if(__sync_bool_compare_and_swap(once, TB_ONCE_NEW, TB_ONCE_IN_PROGRESS)) { |
| 448 | tbthread_cleanup_push(once_cleanup, once); |
| 449 | tbthread_setcancelstate(cancel_state, 0); |
| 450 | |
| 451 | (*func)(); |
| 452 | |
| 453 | tbthread_setcancelstate(TBTHREAD_CANCEL_DISABLE, &cancel_state); |
| 454 | tbthread_cleanup_pop(0); |
| 455 | |
| 456 | *once = TB_ONCE_DONE; |
| 457 | SYSCALL3(__NR_futex, once, FUTEX_WAKE, INT_MAX); |
| 458 | tbthread_setcancelstate(cancel_state, 0); |
| 459 | return 0; |
| 460 | } |
| 461 | |
| 462 | tbthread_setcancelstate(cancel_state, 0); |
| 463 | |
| 464 | //-------------------------------------------------------------------------- |
| 465 | // The waiters |
| 466 | //-------------------------------------------------------------------------- |
| 467 | while(1) { |
| 468 | SYSCALL3(__NR_futex, once, FUTEX_WAIT, TB_ONCE_IN_PROGRESS); |
| 469 | if(*once != TB_ONCE_IN_PROGRESS) |
| 470 | break; |
| 471 | } |
| 472 | } |
| 473 | } |
| 474 | |