aboutsummaryrefslogtreecommitdiff
blob: b2845723fc71cd2f3e88a968f645e76a5c963f1b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
/* Copyright (C) 2009-2013 Free Software Foundation, Inc.
   This file is part of the GNU C Library.
   Contributed by Ulrich Drepper <drepper@redhat.com>, 2009.

   The GNU C Library is free software; you can redistribute it and/or
   modify it under the terms of the GNU Lesser General Public
   License as published by the Free Software Foundation; either
   version 2.1 of the License, or (at your option) any later version.

   The GNU C Library is distributed in the hope that it will be useful,
   but WITHOUT ANY WARRANTY; without even the implied warranty of
   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
   Lesser General Public License for more details.

   You should have received a copy of the GNU Lesser General Public
   License along with the GNU C Library; if not, see
   <http://www.gnu.org/licenses/>.  */

#include <sysdep.h>
#include <tcb-offsets.h>
#include <kernel-features.h>
#include "lowlevellock.h"

#ifdef IS_IN_libpthread
# ifdef SHARED
#  define __pthread_unwind __GI___pthread_unwind
# endif
#else
# ifndef SHARED
	.weak __pthread_unwind
# endif
#endif


#ifdef __ASSUME_PRIVATE_FUTEX
# define LOAD_PRIVATE_FUTEX_WAIT(reg) \
	movl	$(FUTEX_WAIT | FUTEX_PRIVATE_FLAG), reg
#else
# if FUTEX_WAIT == 0
#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
	movl	%fs:PRIVATE_FUTEX, reg
# else
#  define LOAD_PRIVATE_FUTEX_WAIT(reg) \
	movl	%fs:PRIVATE_FUTEX, reg ; \
	orl	$FUTEX_WAIT, reg
# endif
#endif

/* It is crucial that the functions in this file don't modify registers
   other than %rax and %r11.  The syscall wrapper code depends on this
   because it doesn't explicitly save the other registers which hold
   relevant values.  */
	.text

	.hidden __pthread_enable_asynccancel
ENTRY(__pthread_enable_asynccancel)
	movl	%fs:CANCELHANDLING, %eax
2:	movl	%eax, %r11d
	orl	$TCB_CANCELTYPE_BITMASK, %r11d
	cmpl	%eax, %r11d
	je	1f

	lock
	cmpxchgl %r11d, %fs:CANCELHANDLING
	jnz	2b

	andl	$(TCB_CANCELSTATE_BITMASK|TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK|TCB_EXITING_BITMASK|TCB_CANCEL_RESTMASK|TCB_TERMINATED_BITMASK), %r11d
	cmpl	$(TCB_CANCELTYPE_BITMASK|TCB_CANCELED_BITMASK), %r11d
	je	3f

1:	ret

3:	subq	$8, %rsp
	cfi_adjust_cfa_offset(8)
	LP_OP(mov) $TCB_PTHREAD_CANCELED, %fs:RESULT
	lock
	orl	$TCB_EXITING_BITMASK, %fs:CANCELHANDLING
	mov	%fs:CLEANUP_JMP_BUF, %RDI_LP
#ifdef SHARED
	call	__pthread_unwind@PLT
#else
	call	__pthread_unwind
#endif
	hlt
END(__pthread_enable_asynccancel)


	.hidden __pthread_disable_asynccancel
ENTRY(__pthread_disable_asynccancel)
	testl	$TCB_CANCELTYPE_BITMASK, %edi
	jnz	1f

	movl	%fs:CANCELHANDLING, %eax
2:	movl	%eax, %r11d
	andl	$~TCB_CANCELTYPE_BITMASK, %r11d
	lock
	cmpxchgl %r11d, %fs:CANCELHANDLING
	jnz	2b

	movl	%r11d, %eax
3:	andl	$(TCB_CANCELING_BITMASK|TCB_CANCELED_BITMASK), %eax
	cmpl	$TCB_CANCELING_BITMASK, %eax
	je	4f
1:	ret

	/* Performance doesn't matter in this loop.  We will
	   delay until the thread is canceled.  And we will unlikely
	   enter the loop twice.  */
4:	mov	%fs:0, %RDI_LP
	movl	$__NR_futex, %eax
	xorq	%r10, %r10
	addq	$CANCELHANDLING, %rdi
	LOAD_PRIVATE_FUTEX_WAIT (%esi)
	syscall
	movl	%fs:CANCELHANDLING, %eax
	jmp	3b
END(__pthread_disable_asynccancel)