SOURCES: glibc-nptl.patch (NEW) - nptl update from glibc 2.5
arekm
arekm at pld-linux.org
Sun Dec 17 02:12:28 CET 2006
Author: arekm Date: Sun Dec 17 01:12:28 2006 GMT
Module: SOURCES Tag: HEAD
---- Log message:
- nptl update from glibc 2.5
---- Files affected:
SOURCES:
glibc-nptl.patch (NONE -> 1.1) (NEW)
---- Diffs:
================================================================
Index: SOURCES/glibc-nptl.patch
diff -u /dev/null SOURCES/glibc-nptl.patch:1.1
--- /dev/null Sun Dec 17 02:12:28 2006
+++ SOURCES/glibc-nptl.patch Sun Dec 17 02:12:23 2006
@@ -0,0 +1,22183 @@
+diff -urN libc-2.3/nptl/allocatestack.c libc-2.5/nptl/allocatestack.c
+--- libc-2.3/nptl/allocatestack.c 2005-04-09 15:15:26.000000000 +0200
++++ libc-2.5/nptl/allocatestack.c 2006-11-12 22:43:59.683875000 +0100
+@@ -1,4 +1,4 @@
+-/* Copyright (C) 2002, 2003, 2004 Free Software Foundation, Inc.
++/* Copyright (C) 2002, 2003, 2004, 2005, 2006 Free Software Foundation, Inc.
+ This file is part of the GNU C Library.
+ Contributed by Ulrich Drepper <drepper at redhat.com>, 2002.
+
+@@ -33,7 +33,7 @@
+ #ifndef NEED_SEPARATE_REGISTER_STACK
+
+ /* Most architectures have exactly one stack pointer. Some have more. */
+-# define STACK_VARIABLES void *stackaddr
++# define STACK_VARIABLES void *stackaddr = NULL
+
+ /* How to pass the values to the 'create_thread' function. */
+ # define STACK_VARIABLES_ARGS stackaddr
+@@ -52,7 +52,7 @@
+
+ /* We need two stacks. The kernel will place them but we have to tell
+ the kernel about the size of the reserved address space. */
+-# define STACK_VARIABLES void *stackaddr; size_t stacksize
++# define STACK_VARIABLES void *stackaddr = NULL; size_t stacksize = 0
+
+ /* How to pass the values to the 'create_thread' function. */
+ # define STACK_VARIABLES_ARGS stackaddr, stacksize
+@@ -211,6 +211,45 @@
+ }
+
+
++/* Free stacks until cache size is lower than LIMIT. */
++static void
++free_stacks (size_t limit)
++{
++ /* We reduce the size of the cache. Remove the last entries until
++ the size is below the limit. */
++ list_t *entry;
++ list_t *prev;
++
++ /* Search from the end of the list. */
++ list_for_each_prev_safe (entry, prev, &stack_cache)
++ {
++ struct pthread *curr;
++
++ curr = list_entry (entry, struct pthread, list);
++ if (FREE_P (curr))
++ {
++ /* Unlink the block. */
++ list_del (entry);
++
++ /* Account for the freed memory. */
++ stack_cache_actsize -= curr->stackblock_size;
++
++ /* Free the memory associated with the ELF TLS. */
++ _dl_deallocate_tls (TLS_TPADJ (curr), false);
++
++ /* Remove this block. This should never fail. If it does
++ something is really wrong. */
++ if (munmap (curr->stackblock, curr->stackblock_size) != 0)
++ abort ();
++
++ /* Maybe we have freed enough. */
++ if (stack_cache_actsize <= limit)
++ break;
++ }
++ }
++}
++
++
+ /* Add a stack frame which is not used anymore to the stack. Must be
+ called with the cache lock held. */
+ static inline void
+@@ -224,40 +263,15 @@
+
+ stack_cache_actsize += stack->stackblock_size;
+ if (__builtin_expect (stack_cache_actsize > stack_cache_maxsize, 0))
+- {
+- /* We reduce the size of the cache. Remove the last entries
+- until the size is below the limit. */
+- list_t *entry;
+- list_t *prev;
+-
+- /* Search from the end of the list. */
+- list_for_each_prev_safe (entry, prev, &stack_cache)
+- {
+- struct pthread *curr;
+-
+- curr = list_entry (entry, struct pthread, list);
+- if (FREE_P (curr))
+- {
+- /* Unlink the block. */
+- list_del (entry);
+-
+- /* Account for the freed memory. */
+- stack_cache_actsize -= curr->stackblock_size;
++ free_stacks (stack_cache_maxsize);
++}
+
+- /* Free the memory associated with the ELF TLS. */
+- _dl_deallocate_tls (TLS_TPADJ (curr), false);
+
+- /* Remove this block. This should never fail. If it
+- does something is really wrong. */
+- if (munmap (curr->stackblock, curr->stackblock_size) != 0)
+- abort ();
+-
+- /* Maybe we have freed enough. */
+- if (stack_cache_actsize <= stack_cache_maxsize)
+- break;
+- }
+- }
+- }
++/* This function is called indirectly from the freeres code in libc. */
++void
++__free_stack_cache (void)
++{
++ free_stacks (0);
+ }
+
+
+@@ -406,8 +420,9 @@
+ /* Make sure the size of the stack is enough for the guard and
+ eventually the thread descriptor. */
+ guardsize = (attr->guardsize + pagesize_m1) & ~pagesize_m1;
+- if (__builtin_expect (size < (guardsize + __static_tls_size
+- + MINIMAL_REST_STACK + pagesize_m1 + 1),
++ if (__builtin_expect (size < ((guardsize + __static_tls_size
++ + MINIMAL_REST_STACK + pagesize_m1)
++ & ~pagesize_m1),
+ 0))
+ /* The stack is too small (or the guard too large). */
+ return EINVAL;
+@@ -621,6 +636,18 @@
+ stillborn thread could be canceled while the lock is taken. */
+ pd->lock = LLL_LOCK_INITIALIZER;
+
++ /* The robust mutex lists also need to be initialized
++ unconditionally because the cleanup for the previous stack owner
++ might have happened in the kernel. */
++ pd->robust_head.futex_offset = (offsetof (pthread_mutex_t, __data.__lock)
++ - offsetof (pthread_mutex_t,
++ __data.__list.__next));
++ pd->robust_head.list_op_pending = NULL;
++#ifdef __PTHREAD_MUTEX_HAVE_PREV
++ pd->robust_prev = &pd->robust_head;
++#endif
++ pd->robust_head.list = &pd->robust_head;
++
+ /* We place the thread descriptor at the end of the stack. */
+ *pdp = pd;
+
+@@ -729,9 +756,7 @@
+ list_t *runp;
+ list_for_each (runp, &stack_used)
+ {
+- struct pthread *curp;
+-
+- curp = list_entry (runp, struct pthread, list);
++ struct pthread *curp = list_entry (runp, struct pthread, list);
+ if (curp != self)
+ {
+ /* This marks the stack as free. */
+@@ -745,6 +770,13 @@
+ }
+ }
+
++ /* Reset the PIDs in any cached stacks. */
++ list_for_each (runp, &stack_cache)
++ {
++ struct pthread *curp = list_entry (runp, struct pthread, list);
++ curp->pid = self->pid;
++ }
++
+ /* Add the stack of all running threads to the cache. */
+ list_splice (&stack_used, &stack_cache);
+
+@@ -817,6 +849,46 @@
+ }
+ #endif
+
++
++static void
++internal_function
++setxid_signal_thread (struct xid_command *cmdp, struct pthread *t)
++{
++ if (! IS_DETACHED (t))
++ {
++ int ch;
++ do
++ {
++ ch = t->cancelhandling;
++
++ /* If the thread is exiting right now, ignore it. */
++ if ((ch & EXITING_BITMASK) != 0)
++ return;
++ }
++ while (atomic_compare_and_exchange_bool_acq (&t->cancelhandling,
++ ch | SETXID_BITMASK, ch));
++ }
++
++ int val;
++ INTERNAL_SYSCALL_DECL (err);
++#if __ASSUME_TGKILL
++ val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
++ t->tid, SIGSETXID);
++#else
++# ifdef __NR_tgkill
++ val = INTERNAL_SYSCALL (tgkill, err, 3, THREAD_GETMEM (THREAD_SELF, pid),
++ t->tid, SIGSETXID);
++ if (INTERNAL_SYSCALL_ERROR_P (val, err)
++ && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
++# endif
++ val = INTERNAL_SYSCALL (tkill, err, 2, t->tid, SIGSETXID);
++#endif
++
++ if (!INTERNAL_SYSCALL_ERROR_P (val, err))
++ atomic_increment (&cmdp->cntr);
++}
++
++
+ int
+ attribute_hidden
+ __nptl_setxid (struct xid_command *cmdp)
+@@ -827,8 +899,6 @@
+ __xidcmd = cmdp;
+ cmdp->cntr = 0;
+
+- INTERNAL_SYSCALL_DECL (err);
+-
+ struct pthread *self = THREAD_SELF;
+
+ /* Iterate over the list with system-allocated threads first. */
+@@ -836,54 +906,20 @@
+ list_for_each (runp, &stack_used)
+ {
+ struct pthread *t = list_entry (runp, struct pthread, list);
+- if (t != self)
+- {
+- int val;
+-#if __ASSUME_TGKILL
+- val = INTERNAL_SYSCALL (tgkill, err, 3,
+- THREAD_GETMEM (THREAD_SELF, pid),
+- t->tid, SIGSETXID);
+-#else
+-# ifdef __NR_tgkill
+- val = INTERNAL_SYSCALL (tgkill, err, 3,
+- THREAD_GETMEM (THREAD_SELF, pid),
+- t->tid, SIGSETXID);
+- if (INTERNAL_SYSCALL_ERROR_P (val, err)
+- && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
+-# endif
+- val = INTERNAL_SYSCALL (tkill, err, 2, t->tid, SIGSETXID);
+-#endif
++ if (t == self)
++ continue;
+
+- if (!INTERNAL_SYSCALL_ERROR_P (val, err))
+- atomic_increment (&cmdp->cntr);
+- }
++ setxid_signal_thread (cmdp, t);
+ }
+
+ /* Now the list with threads using user-allocated stacks. */
+ list_for_each (runp, &__stack_user)
+ {
+ struct pthread *t = list_entry (runp, struct pthread, list);
+- if (t != self)
+- {
+- int val;
+-#if __ASSUME_TGKILL
+- val = INTERNAL_SYSCALL (tgkill, err, 3,
+- THREAD_GETMEM (THREAD_SELF, pid),
+- t->tid, SIGSETXID);
+-#else
+-# ifdef __NR_tgkill
+- val = INTERNAL_SYSCALL (tgkill, err, 3,
+- THREAD_GETMEM (THREAD_SELF, pid),
+- t->tid, SIGSETXID);
+- if (INTERNAL_SYSCALL_ERROR_P (val, err)
+- && INTERNAL_SYSCALL_ERRNO (val, err) == ENOSYS)
+-# endif
+- val = INTERNAL_SYSCALL (tkill, err, 2, t->tid, SIGSETXID);
+-#endif
++ if (t == self)
++ continue;
+
+- if (!INTERNAL_SYSCALL_ERROR_P (val, err))
+- atomic_increment (&cmdp->cntr);
+- }
++ setxid_signal_thread (cmdp, t);
+ }
+
+ int cur = cmdp->cntr;
+@@ -895,6 +931,7 @@
+
+ /* This must be last, otherwise the current thread might not have
+ permissions to send SIGSETXID syscall to the other threads. */
++ INTERNAL_SYSCALL_DECL (err);
+ result = INTERNAL_SYSCALL_NCS (cmdp->syscall_no, err, 3,
+ cmdp->id[0], cmdp->id[1], cmdp->id[2]);
+ if (INTERNAL_SYSCALL_ERROR_P (result, err))
+@@ -920,7 +957,8 @@
+ # endif
+
+ /* Fill in the DTV slot so that a later LD/GD access will find it. */
+- dtv[map->l_tls_modid].pointer = dest;
++ dtv[map->l_tls_modid].pointer.val = dest;
++ dtv[map->l_tls_modid].pointer.is_static = true;
+
+ /* Initialize the memory. */
+ memset (__mempcpy (dest, map->l_tls_initimage, map->l_tls_initimage_size),
+diff -urN libc-2.3/nptl/ChangeLog libc-2.5/nptl/ChangeLog
+--- libc-2.3/nptl/ChangeLog 2006-11-12 17:27:57.870836000 +0100
++++ libc-2.5/nptl/ChangeLog 2006-11-12 22:49:12.235408500 +0100
+@@ -1,8 +1,1161 @@
++2006-09-24 Ulrich Drepper <drepper at redhat.com>
++
++ [BZ #3251]
++ * descr.h (ENQUEUE_MUTEX_BOTH): Add cast to avoid warning.
++ Patch by Petr Baudis.
++
++2006-09-18 Jakub Jelinek <jakub at redhat.com>
++
++ * tst-kill4.c (do_test): Explicitly set tf thread's stack size.
++
++ * tst-cancel2.c (tf): Loop as long as something was written.
++
++2006-09-12 Kaz Kojima <kkojima at rr.iij4u.or.jp>
++
++ * sysdeps/unix/sysv/linux/sh/pthread_cond_broadcast.S: For PI
++ mutexes wake all mutexes.
++ * sysdeps/unix/sysv/linux/sh/pthread_cond_wait.S: Don't increment
++ WAKEUP_SEQ if this would increase the value beyond TOTAL_SEQ.
++ * sysdeps/unix/sysv/linux/sh/pthread_cond_timedwait.S: Likewise.
++
++2006-09-12 Ulrich Drepper <drepper at redhat.com>
++
++ * tst-cond22.c (tf): Slight changes to the pthread_cond_wait use
++ to guarantee the thread is always canceled.
++
++2006-09-08 Jakub Jelinek <jakub at redhat.com>
++
++ * tst-cond22.c: Include pthread.h instead of pthreadP.h.
++ Include stdlib.h.
++ * sysdeps/pthread/pthread_cond_wait.c (__condvar_cleanup): Only
++ increase FUTEX if increasing WAKEUP_SEQ. Fix comment typo.
++ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Likewise.
++ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Likewise.
++ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Likewise.
++
++2006-09-08 Ulrich Drepper <drepper at redhat.com>
++
++ [BZ #3123]
++ * sysdeps/pthread/pthread_cond_wait.c (__condvar_cleanup): Don't
++ increment WAKEUP_SEQ if this would increase the value beyond TOTAL_SEQ.
++ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_wait.S: Likewise.
++ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_timedwait.S: Likewise.
++ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_wait.S: Likewise.
++ * Makefile (tests): Add tst-cond22.
++ * tst-cond22.c: New file.
++
++2006-09-05 Ulrich Drepper <drepper at redhat.com>
++
++ [BZ #3124]
++ * descr.h (struct pthread): Add parent_cancelhandling.
++ * sysdeps/pthread/createthread.c (create_thread): Pass parent
++ cancelhandling value to child.
++ * pthread_create.c (start_thread): If parent thread was canceled
++ reset the SIGCANCEL mask.
++ * Makefile (tests): Add tst-cancel25.
++ * tst-cancel25.c: New file.
++
++2006-09-05 Jakub Jelinek <jakub at redhat.com>
++ Ulrich Drepper <drepper at redhat.com>
++
++ * sysdeps/pthread/gai_misc.h (GAI_MISC_NOTIFY): Don't decrement
++ counterp if it is already zero.
++ * sysdeps/pthread/aio_misc.h (AIO_MISC_NOTIFY): Likewise..
++
++2006-03-04 Jakub Jelinek <jakub at redhat.com>
++ Roland McGrath <roland at redhat.com>
++
++ * sysdeps/unix/sysv/linux/i386/lowlevellock.h
++ (LLL_STUB_UNWIND_INFO_START, LLL_STUB_UNWIND_INFO_END,
++ LLL_STUB_UNWIND_INFO_3, LLL_STUB_UNWIND_INFO_4): Define.
++ (lll_mutex_lock, lll_robust_mutex_lock, lll_mutex_cond_lock,
++ lll_robust_mutex_cond_lock, lll_mutex_timedlock,
++ lll_robust_mutex_timedlock, lll_mutex_unlock,
++ lll_robust_mutex_unlock, lll_lock, lll_unlock): Use them.
++ Add _L_*_ symbols around the subsection.
++ * sysdeps/unix/sysv/linux/i386/i486/lowlevellock.S: Add unwind info.
++ * sysdeps/unix/sysv/linux/i386/i486/lowlevelrobustlock.S: Likewise.
++
++2006-03-03 Jakub Jelinek <jakub at redhat.com>
++ Roland McGrath <roland at redhat.com>
++
++ * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h
++ (LLL_STUB_UNWIND_INFO_START, LLL_STUB_UNWIND_INFO_END,
++ LLL_STUB_UNWIND_INFO_5, LLL_STUB_UNWIND_INFO_6): Define.
++ (lll_mutex_lock, lll_robust_mutex_lock, lll_mutex_cond_lock,
++ lll_robust_mutex_cond_lock, lll_mutex_timedlock,
++ lll_robust_mutex_timedlock, lll_mutex_unlock,
++ lll_robust_mutex_unlock, lll_lock, lll_unlock): Use them.
++ Add _L_*_ symbols around the subsection.
++ * sysdeps/unix/sysv/linux/x86_64/lowlevellock.S: Add unwind info.
++ * sysdeps/unix/sysv/linux/x86_64/lowlevelrobustlock.S: Likewise.
++
++2006-08-31 Ulrich Drepper <drepper at redhat.com>
++
++ * pthread_rwlock_trywrlock.c (__pthread_rwlock_trywrlock): Undo last
++ change because it can disturb too much existing code. If real hard
++ reader preference is needed we'll introduce another type.
++ * sysdeps/pthread/pthread_rwlock_timedwrlock.c
++ (pthread_rwlock_timedwrlock): Likewise.
++ * sysdeps/pthread/pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock):
++ Likewise.
++
++2006-08-30 Ulrich Drepper <drepper at redhat.com>
++
++ * pthread_rwlock_trywrlock.c (__pthread_rwlock_trywrlock): Respect
++ reader preference.
++ * sysdeps/pthread/pthread_rwlock_timedwrlock.c
++ (pthread_rwlock_timedwrlock): Likewise.
++ * sysdeps/pthread/pthread_rwlock_wrlock.c (__pthread_rwlock_wrlock):
++ Likewise.
++
++2006-08-25 Jakub Jelinek <jakub at redhat.com>
++
++ * sysdeps/unix/sysv/linux/libc_pthread_init.c (freeres_libpthread):
++ Only define ifdef SHARED.
++
++2006-08-23 Ulrich Drepper <drepper at redhat.com>
++
++ * allocatestack.c (queue_stack): Move freeing of surplus stacks to...
++ (free_stacks): ...here.
++ (__free_stack_cache): New function.
++ * pthreadP.h: Declare __free_stack_cache.
++ * sysdeps/pthread/pthread-functions.h (pthread_functions): Add
++ ptr_freeres.
++ * init.c (pthread_functions): Initialize ptr_freeres.
++ * sysdeps/unix/sysv/linux/libc_pthread_init.c (freeres_libptread):
++ New freeres function.
++
++2006-07-30 Joseph S. Myers <joseph at codesourcery.com>
++
++ [BZ #3018]
++ * Makefile (extra-objs): Add modules to extra-test-objs instead.
++
++2006-08-20 Ulrich Drepper <drepper at redhat.com>
++
++ * sysdeps/unix/sysv/linux/bits/posix_opt.h: Define
++ _XOPEN_REALTIME_THREADS.
++
++2006-08-15 Jakub Jelinek <jakub at redhat.com>
++
++ * sysdeps/unix/sysv/linux/clock_settime.c (INTERNAL_VSYSCALL): Use
++ HAVE_CLOCK_GETRES_VSYSCALL as guard macro rather than
++ HAVE_CLOCK_GETTIME_VSYSCALL.
++ (maybe_syscall_settime_cpu): Use plain INTERNAL_VSYSCALL here.
++
++2006-08-14 Jakub Jelinek <jakub at redhat.com>
++
++ * sysdeps/unix/sysv/linux/bits/posix_opt.h
++ (_POSIX_THREAD_PRIO_PROTECT): Define to 200112L.
++ * descr.h (struct priority_protection_data): New type.
++ (struct pthread): Add tpp field.
++ * pthreadP.h (PTHREAD_MUTEX_PP_NORMAL_NP,
++ PTHREAD_MUTEX_PP_RECURSIVE_NP, PTHREAD_MUTEX_PP_ERRORCHECK_NP,
++ PTHREAD_MUTEX_PP_ADAPTIVE_NP): New enum values.
++ * pthread_mutex_init.c (__pthread_mutex_init): Handle non-robust
++ TPP mutexes.
++ * pthread_mutex_lock.c (__pthread_mutex_lock): Handle TPP mutexes.
++ * pthread_mutex_trylock.c (__pthread_mutex_trylock): Likewise.
++ * pthread_mutex_timedlock.c (pthread_mutex_timedlock): Likewise.
++ * pthread_mutex_unlock.c (__pthread_mutex_unlock_usercnt): Likewise.
++ * tpp.c: New file.
++ * pthread_setschedparam.c (__pthread_setschedparam): Handle priority
++ boosted by TPP.
++ * pthread_setschedprio.c (pthread_setschedprio): Likewise.
++ * pthread_mutexattr_getprioceiling.c
++ (pthread_mutexattr_getprioceiling): If ceiling is 0, ensure it is
++ in the SCHED_FIFO priority range.
++ * pthread_mutexattr_setprioceiling.c
++ (pthread_mutexattr_setprioceiling): Fix prioceiling validation.
++ * pthread_mutex_getprioceiling.c (pthread_mutex_getprioceiling): Fail
++ if mutex is not TPP. Ceiling is now in __data.__lock.
++ * pthread_mutex_setprioceiling.c: Include stdbool.h.
++ (pthread_mutex_setprioceiling): Fix prioceiling validation. Ceiling
++ is now in __data.__lock. Add locking.
++ * pthread_create.c (__free_tcb): Free pd->tpp structure.
++ * Makefile (libpthread-routines): Add tpp.
++ (xtests): Add tst-mutexpp1, tst-mutexpp6 and tst-mutexpp10.
++ * tst-tpp.h: New file.
++ * tst-mutexpp1.c: New file.
++ * tst-mutexpp6.c: New file.
++ * tst-mutexpp10.c: New file.
++ * tst-mutex1.c (TEST_FUNCTION): Don't redefine if already defined.
++ * tst-mutex6.c (TEST_FUNCTION): Likewise.
++
++2006-08-12 Ulrich Drepper <drepper at redhat.com>
++
++ [BZ #2843]
++ * pthread_join.c (pthread_join): Account for self being canceled
++ when checking for deadlocks.
++ * tst-join5.c: Cleanups. Allow to be used in tst-join6.
++ (tf1): Don't print anything after pthread_join returns, this would be
++ another cancellation point.
++ (tf2): Likewise.
++ * tst-join6.c: New file.
++ * Makefile (tests): Add tst-join6.
++
++2006-08-03 Ulrich Drepper <drepper at redhat.com>
++
++ [BZ #2892]
++ * pthread_setspecific.c (__pthread_setspecific): Check
++ out-of-range index before checking for unused key.
++
++ * sysdeps/pthread/gai_misc.h: New file.
++
++2006-08-01 Ulrich Drepper <drepper at redhat.com>
++
++ * sysdeps/unix/sysv/linux/i386/smp.h: New file. Old Linux-specific
++ file. Don't use sysctl.
++ * sysdeps/unix/sysv/linux/smp.h: Always assume SMP. Archs can
++ overwrite the file if this is likely not true.
++
++2006-07-31 Daniel Jacobowitz <dan at codesourcery.com>
++
++ * allocatestack.c (__reclaim_stacks): Reset the PID on cached stacks.
++ * Makefile (tests): Add tst-getpid3.
++ * tst-getpid3.c: New file.
++
++2006-07-30 Roland McGrath <roland at redhat.com>
++
++ * Makefile (libpthread-routines): Add ptw-sigsuspend.
++
++ * sysdeps/unix/sysv/linux/i386/not-cancel.h
++ (pause_not_cancel): New macro.
++ (nanosleep_not_cancel): New macro.
++ (sigsuspend_not_cancel): New macro.
++ * pthread_mutex_timedlock.c (pthread_mutex_timedlock): Use
++ nanosleep_not_cancel macro from <not-cancel.h>.
++ * pthread_mutex_lock.c (__pthread_mutex_lock): Use pause_not_cancel
++ macro from <not-cancel.h>.
++
++2006-07-28 Ulrich Drepper <drepper at redhat.com>
++ Jakub Jelinek <jakub at redhat.com>
++
++ * descr.h: Change ENQUEUE_MUTEX and DEQUEUE_MUTEX for bit 0
++ notification of PI mutex. Add ENQUEUE_MUTEX_PI.
++ * pthreadP.h: Define PTHREAD_MUTEX_PI_* macros for PI mutex types.
++ * pthread_mutex_setprioceilining.c: Adjust for mutex type name change.
++ * pthread_mutex_init.c: Add support for priority inheritance mutex.
++ * pthread_mutex_lock.c: Likewise.
++ * pthread_mutex_timedlock.c: Likewise.
++ * pthread_mutex_trylock.c: Likewise.
++ * pthread_mutex_unlock.c: Likewise.
++ * sysdeps/pthread/pthread_cond_broadcast.c: For PI mutexes wake
++ all mutexes.
++ * sysdeps/unix/sysv/linux/i386/i486/pthread_cond_broadcast.c: Likewise.
++ * sysdeps/unix/sysv/linux/x86_64/pthread_cond_broadcast.c: Likewise.
++ * sysdeps/unix/sysv/linux/pthread-pi-defines.sym: New file.
++ * sysdeps/unix/sysv/linux/Makefile (gen-as-const-header): Add
++ pthread-pi-defines.sym.
++ * sysdeps/unix/sysv/linux/i386/lowlevellock.h: Define FUTEX_LOCK_PI,
++ FUTEX_UNLOCK_PI, and FUTEX_TRYLOCK_PI.
++ * sysdeps/unix/sysv/linux/x86_64/lowlevellock.h: Likewise.
++ * sysdeps/unix/sysv/linux/alpha/lowlevellock.h: Likewise.
++ * sysdeps/unix/sysv/linux/ia64/lowlevellock.h: Likewise.
++ * sysdeps/unix/sysv/linux/powerpc/lowlevellock.h: Likewise.
++ * sysdeps/unix/sysv/linux/s390/lowlevellock.h: Likewise.
++ * sysdeps/unix/sysv/linux/sh/lowlevellock.h: Likewise.
++ * sysdeps/unix/sysv/linux/sparc/lowlevellock.h: Likewise.
++ * sysdeps/unix/sysv/linux/bits/posix_opt.h: Define
++ _POSIX_THREAD_PRIO_INHERIT to 200112L.
++ * tst-mutex1.c: Adjust to allow use in PI mutex test.
++ * tst-mutex2.c: Likewise.
++ * tst-mutex3.c: Likewise.
++ * tst-mutex4.c: Likewise.
++ * tst-mutex5.c: Likewise.
++ * tst-mutex6.c: Likewise.
++ * tst-mutex7.c: Likewise.
++ * tst-mutex7a.c: Likewise.
++ * tst-mutex8.c: Likewise.
++ * tst-mutex9.c: Likewise.
++ * tst-robust1.c: Likewise.
++ * tst-robust7.c: Likewise.
++ * tst-robust8.c: Likewise.
++ * tst-mutexpi1.c: New file.
++ * tst-mutexpi2.c: New file.
<<Diff was trimmed, longer than 597 lines>>
More information about the pld-cvs-commit
mailing list