mirror of
git://sourceware.org/git/glibc.git
synced 2024-11-21 01:12:26 +08:00
4a39c34c4f
Since gettimeofday will shortly be implemented in terms of clock_gettime on all platforms, internal code should use clock_gettime directly; in addition to removing a layer of indirection, this will allow us to remove the PLT-bypass gunk for gettimeofday. (We can't quite do that yet, but it'll be coming later in this patch series.) In many cases, the changed code does fewer conversions. The changed code always assumes __clock_gettime (CLOCK_REALTIME) cannot fail. Most of the call sites were assuming gettimeofday could not fail, but a few places were checking for errors. POSIX says clock_gettime can only fail if the clock constant is invalid or unsupported, and CLOCK_REALTIME is the one and only clock constant that's required to be supported. For consistency I grepped the entire source tree for any other places that checked for errors from __clock_gettime (CLOCK_REALTIME), found one, and changed it too. (For the record, POSIX also says gettimeofday can never fail.) (It would be nice if we could declare that GNU systems will always support CLOCK_MONOTONIC as well as CLOCK_REALTIME; there are several places where we are using CLOCK_REALTIME where _MONOTONIC would be more appropriate, and/or trying to use _MONOTONIC and then falling back to _REALTIME. But the Hurd doesn't support CLOCK_MONOTONIC yet, and it looks like adding it would involve substantial changes to gnumach's internals and API. Oh well.) A few Hurd-specific files were changed to use __host_get_time instead of __clock_gettime, as this seemed tidier. We also assume this cannot fail. Skimming the code in gnumach leads me to believe the only way it could fail is if __mach_host_self also failed, and our Hurd-specific code consistently assumes that can't happen, so I'm going with that. With the exception of support/support_test_main.c, test cases are not modified, mainly because I didn't want to have to figure out which test cases were testing gettimeofday specifically. The definition of GETTIME in sysdeps/generic/memusage.h had a typo and was not reading tv_sec at all. I fixed this. It appears nobody has been generating malloc traces on a machine that doesn't have a superseding definition. There are a whole bunch of places where the code could be simplified by factoring out timespec subtraction and/or comparison logic, but I want to keep this patch as mechanical as possible. Checked on x86_64-linux-gnu, i686-linux-gnu, powerpc64le-linux-gnu, powerpc64-linux-gnu, powerpc-linux-gnu, and aarch64-linux-gnu. Reviewed-by: Adhemerval Zanella <adhemerval.zanella@linaro.org> Reviewed-by: Lukasz Majewski <lukma@denx.de>
117 lines
3.4 KiB
C
117 lines
3.4 KiB
C
/* Computing deadlines for timeouts.
|
|
Copyright (C) 2017-2019 Free Software Foundation, Inc.
|
|
This file is part of the GNU C Library.
|
|
|
|
The GNU C Library is free software; you can redistribute it and/or
|
|
modify it under the terms of the GNU Lesser General Public
|
|
License as published by the Free Software Foundation; either
|
|
version 2.1 of the License, or (at your option) any later version.
|
|
|
|
The GNU C Library is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
Lesser General Public License for more details.
|
|
|
|
You should have received a copy of the GNU Lesser General Public
|
|
License along with the GNU C Library; if not, see
|
|
<https://www.gnu.org/licenses/>. */
|
|
|
|
#include <net-internal.h>
|
|
|
|
#include <assert.h>
|
|
#include <limits.h>
|
|
#include <stdio.h>
|
|
#include <stdint.h>
|
|
#include <time.h>
|
|
|
|
struct deadline_current_time
|
|
__deadline_current_time (void)
|
|
{
|
|
struct deadline_current_time result;
|
|
if (__clock_gettime (CLOCK_MONOTONIC, &result.current) != 0)
|
|
__clock_gettime (CLOCK_REALTIME, &result.current);
|
|
assert (result.current.tv_sec >= 0);
|
|
return result;
|
|
}
|
|
|
|
/* A special deadline value for which __deadline_is_infinite is
|
|
true. */
|
|
static inline struct deadline
|
|
infinite_deadline (void)
|
|
{
|
|
return (struct deadline) { { -1, -1 } };
|
|
}
|
|
|
|
struct deadline
|
|
__deadline_from_timeval (struct deadline_current_time current,
|
|
struct timeval tv)
|
|
{
|
|
assert (__is_timeval_valid_timeout (tv));
|
|
|
|
/* Compute second-based deadline. Perform the addition in
|
|
uintmax_t, which is unsigned, to simply overflow detection. */
|
|
uintmax_t sec = current.current.tv_sec;
|
|
sec += tv.tv_sec;
|
|
if (sec < (uintmax_t) tv.tv_sec)
|
|
return infinite_deadline ();
|
|
|
|
/* Compute nanosecond deadline. */
|
|
int nsec = current.current.tv_nsec + tv.tv_usec * 1000;
|
|
if (nsec >= 1000 * 1000 * 1000)
|
|
{
|
|
/* Carry nanosecond overflow to seconds. */
|
|
nsec -= 1000 * 1000 * 1000;
|
|
if (sec + 1 < sec)
|
|
return infinite_deadline ();
|
|
++sec;
|
|
}
|
|
/* This uses a GCC extension, otherwise these casts for detecting
|
|
overflow would not be defined. */
|
|
if ((time_t) sec < 0 || sec != (uintmax_t) (time_t) sec)
|
|
return infinite_deadline ();
|
|
|
|
return (struct deadline) { { sec, nsec } };
|
|
}
|
|
|
|
int
|
|
__deadline_to_ms (struct deadline_current_time current,
|
|
struct deadline deadline)
|
|
{
|
|
if (__deadline_is_infinite (deadline))
|
|
return INT_MAX;
|
|
|
|
if (current.current.tv_sec > deadline.absolute.tv_sec
|
|
|| (current.current.tv_sec == deadline.absolute.tv_sec
|
|
&& current.current.tv_nsec >= deadline.absolute.tv_nsec))
|
|
return 0;
|
|
time_t sec = deadline.absolute.tv_sec - current.current.tv_sec;
|
|
if (sec >= INT_MAX)
|
|
/* This value will overflow below. */
|
|
return INT_MAX;
|
|
int nsec = deadline.absolute.tv_nsec - current.current.tv_nsec;
|
|
if (nsec < 0)
|
|
{
|
|
/* Borrow from the seconds field. */
|
|
assert (sec > 0);
|
|
--sec;
|
|
nsec += 1000 * 1000 * 1000;
|
|
}
|
|
|
|
/* Prepare for rounding up to milliseconds. */
|
|
nsec += 999999;
|
|
if (nsec > 1000 * 1000 * 1000)
|
|
{
|
|
assert (sec < INT_MAX);
|
|
++sec;
|
|
nsec -= 1000 * 1000 * 1000;
|
|
}
|
|
|
|
unsigned int msec = nsec / (1000 * 1000);
|
|
if (sec > INT_MAX / 1000)
|
|
return INT_MAX;
|
|
msec += sec * 1000;
|
|
if (msec > INT_MAX)
|
|
return INT_MAX;
|
|
return msec;
|
|
}
|