Commit f2772a0e authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml

Pull UML updates from Richard Weinberger:

 - A new timer mode, time travel, for testing with UML

 - Many bugixes/improvements for the serial line driver

 - Various bugfixes

* tag 'for-linus-5.3-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/rw/uml:
  um: fix build without CONFIG_UML_TIME_TRAVEL_SUPPORT
  um: Fix kcov crash during startup
  um: configs: Remove useless UEVENT_HELPER_PATH
  um: Support time travel mode
  um: Pass nsecs to os timer functions
  um: Remove drivers/ssl.h
  um: Don't garbage collect in deactivate_all_fds()
  um: Silence lockdep complaint about mmap_sem
  um: Remove locking in deactivate_all_fds()
  um: Timer code cleanup
  um: fix os_timer_one_shot()
  um: Fix IRQ controller regression on console read
parents fcd98147 b482e48d
...@@ -184,6 +184,18 @@ config SECCOMP ...@@ -184,6 +184,18 @@ config SECCOMP
If unsure, say Y. If unsure, say Y.
config UML_TIME_TRAVEL_SUPPORT
bool
prompt "Support time-travel mode (e.g. for test execution)"
help
Enable this option to support time travel inside the UML instance.
After enabling this option, two modes are accessible at runtime
(selected by the kernel command line), see the kernel's command-
line help for more details.
It is safe to say Y, but you probably don't need this.
endmenu endmenu
source "arch/um/drivers/Kconfig" source "arch/um/drivers/Kconfig"
...@@ -36,7 +36,6 @@ CONFIG_XTERM_CHAN=y ...@@ -36,7 +36,6 @@ CONFIG_XTERM_CHAN=y
CONFIG_CON_CHAN="pts" CONFIG_CON_CHAN="pts"
CONFIG_SSL_CHAN="pts" CONFIG_SSL_CHAN="pts"
CONFIG_UML_SOUND=m CONFIG_UML_SOUND=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_UBD=y CONFIG_BLK_DEV_UBD=y
......
...@@ -34,7 +34,6 @@ CONFIG_XTERM_CHAN=y ...@@ -34,7 +34,6 @@ CONFIG_XTERM_CHAN=y
CONFIG_CON_CHAN="pts" CONFIG_CON_CHAN="pts"
CONFIG_SSL_CHAN="pts" CONFIG_SSL_CHAN="pts"
CONFIG_UML_SOUND=m CONFIG_UML_SOUND=m
CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y CONFIG_DEVTMPFS_MOUNT=y
CONFIG_BLK_DEV_UBD=y CONFIG_BLK_DEV_UBD=y
......
...@@ -171,19 +171,55 @@ int enable_chan(struct line *line) ...@@ -171,19 +171,55 @@ int enable_chan(struct line *line)
return err; return err;
} }
/* Items are added in IRQ context, when free_irq can't be called, and
* removed in process context, when it can.
* This handles interrupt sources which disappear, and which need to
* be permanently disabled. This is discovered in IRQ context, but
* the freeing of the IRQ must be done later.
*/
static DEFINE_SPINLOCK(irqs_to_free_lock);
static LIST_HEAD(irqs_to_free);
void free_irqs(void)
{
struct chan *chan;
LIST_HEAD(list);
struct list_head *ele;
unsigned long flags;
spin_lock_irqsave(&irqs_to_free_lock, flags);
list_splice_init(&irqs_to_free, &list);
spin_unlock_irqrestore(&irqs_to_free_lock, flags);
list_for_each(ele, &list) {
chan = list_entry(ele, struct chan, free_list);
if (chan->input && chan->enabled)
um_free_irq(chan->line->driver->read_irq, chan);
if (chan->output && chan->enabled)
um_free_irq(chan->line->driver->write_irq, chan);
chan->enabled = 0;
}
}
static void close_one_chan(struct chan *chan, int delay_free_irq) static void close_one_chan(struct chan *chan, int delay_free_irq)
{ {
unsigned long flags;
if (!chan->opened) if (!chan->opened)
return; return;
/* we can safely call free now - it will be marked if (delay_free_irq) {
* as free and freed once the IRQ stopped processing spin_lock_irqsave(&irqs_to_free_lock, flags);
*/ list_add(&chan->free_list, &irqs_to_free);
if (chan->input && chan->enabled) spin_unlock_irqrestore(&irqs_to_free_lock, flags);
um_free_irq(chan->line->driver->read_irq, chan); } else {
if (chan->output && chan->enabled) if (chan->input && chan->enabled)
um_free_irq(chan->line->driver->write_irq, chan); um_free_irq(chan->line->driver->read_irq, chan);
chan->enabled = 0; if (chan->output && chan->enabled)
um_free_irq(chan->line->driver->write_irq, chan);
chan->enabled = 0;
}
if (chan->ops->close != NULL) if (chan->ops->close != NULL)
(*chan->ops->close)(chan->fd, chan->data); (*chan->ops->close)(chan->fd, chan->data);
......
...@@ -12,7 +12,6 @@ ...@@ -12,7 +12,6 @@
#include <linux/console.h> #include <linux/console.h>
#include <asm/termbits.h> #include <asm/termbits.h>
#include <asm/irq.h> #include <asm/irq.h>
#include "ssl.h"
#include "chan.h" #include "chan.h"
#include <init.h> #include <init.h>
#include <irq_user.h> #include <irq_user.h>
......
/*
* Copyright (C) 2000 Jeff Dike (jdike@karaya.com)
* Licensed under the GPL
*/
#ifndef __SSL_H__
#define __SSL_H__
extern int ssl_read(int fd, int line);
extern void ssl_receive_char(int line, char ch);
#endif
...@@ -52,7 +52,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new) ...@@ -52,7 +52,7 @@ static inline void activate_mm(struct mm_struct *old, struct mm_struct *new)
* when the new ->mm is used for the first time. * when the new ->mm is used for the first time.
*/ */
__switch_mm(&new->context.id); __switch_mm(&new->context.id);
down_write(&new->mmap_sem); down_write_nested(&new->mmap_sem, 1);
uml_setup_stubs(new); uml_setup_stubs(new);
up_write(&new->mmap_sem); up_write(&new->mmap_sem);
} }
......
...@@ -250,15 +250,13 @@ extern void os_warn(const char *fmt, ...) ...@@ -250,15 +250,13 @@ extern void os_warn(const char *fmt, ...)
/* time.c */ /* time.c */
extern void os_idle_sleep(unsigned long long nsecs); extern void os_idle_sleep(unsigned long long nsecs);
extern int os_timer_create(void* timer); extern int os_timer_create(void);
extern int os_timer_set_interval(void* timer, void* its); extern int os_timer_set_interval(unsigned long long nsecs);
extern int os_timer_one_shot(int ticks); extern int os_timer_one_shot(unsigned long long nsecs);
extern long long os_timer_disable(void); extern void os_timer_disable(void);
extern long os_timer_remain(void* timer);
extern void uml_idle_timer(void); extern void uml_idle_timer(void);
extern long long os_persistent_clock_emulation(void); extern long long os_persistent_clock_emulation(void);
extern long long os_nsecs(void); extern long long os_nsecs(void);
extern long long os_vnsecs(void);
/* skas/mem.c */ /* skas/mem.c */
extern long run_syscall_stub(struct mm_id * mm_idp, extern long run_syscall_stub(struct mm_id * mm_idp,
......
...@@ -10,4 +10,52 @@ ...@@ -10,4 +10,52 @@
#define TIMER_MULTIPLIER 256 #define TIMER_MULTIPLIER 256
#define TIMER_MIN_DELTA 500 #define TIMER_MIN_DELTA 500
enum time_travel_mode {
TT_MODE_OFF,
TT_MODE_BASIC,
TT_MODE_INFCPU,
};
enum time_travel_timer_mode {
TT_TMR_DISABLED,
TT_TMR_ONESHOT,
TT_TMR_PERIODIC,
};
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
extern enum time_travel_mode time_travel_mode;
extern unsigned long long time_travel_time;
extern enum time_travel_timer_mode time_travel_timer_mode;
extern unsigned long long time_travel_timer_expiry;
extern unsigned long long time_travel_timer_interval;
static inline void time_travel_set_time(unsigned long long ns)
{
time_travel_time = ns;
}
static inline void time_travel_set_timer(enum time_travel_timer_mode mode,
unsigned long long expiry)
{
time_travel_timer_mode = mode;
time_travel_timer_expiry = expiry;
}
#else
#define time_travel_mode TT_MODE_OFF
#define time_travel_time 0
#define time_travel_timer_expiry 0
#define time_travel_timer_interval 0
static inline void time_travel_set_time(unsigned long long ns)
{
}
static inline void time_travel_set_timer(enum time_travel_timer_mode mode,
unsigned long long expiry)
{
}
#define time_travel_timer_mode TT_TMR_DISABLED
#endif
#endif #endif
...@@ -21,6 +21,8 @@ ...@@ -21,6 +21,8 @@
#include <irq_user.h> #include <irq_user.h>
extern void free_irqs(void);
/* When epoll triggers we do not know why it did so /* When epoll triggers we do not know why it did so
* we can also have different IRQs for read and write. * we can also have different IRQs for read and write.
* This is why we keep a small irq_fd array for each fd - * This is why we keep a small irq_fd array for each fd -
...@@ -100,6 +102,8 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) ...@@ -100,6 +102,8 @@ void sigio_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
} }
} }
} }
free_irqs();
} }
static int assign_epoll_events_to_irq(struct irq_entry *irq_entry) static int assign_epoll_events_to_irq(struct irq_entry *irq_entry)
...@@ -380,10 +384,8 @@ EXPORT_SYMBOL(deactivate_fd); ...@@ -380,10 +384,8 @@ EXPORT_SYMBOL(deactivate_fd);
*/ */
int deactivate_all_fds(void) int deactivate_all_fds(void)
{ {
unsigned long flags;
struct irq_entry *to_free; struct irq_entry *to_free;
spin_lock_irqsave(&irq_lock, flags);
/* Stop IO. The IRQ loop has no lock so this is our /* Stop IO. The IRQ loop has no lock so this is our
* only way of making sure we are safe to dispose * only way of making sure we are safe to dispose
* of all IRQ handlers * of all IRQ handlers
...@@ -399,8 +401,7 @@ int deactivate_all_fds(void) ...@@ -399,8 +401,7 @@ int deactivate_all_fds(void)
); );
to_free = to_free->next; to_free = to_free->next;
} }
garbage_collect_irq_entries(); /* don't garbage collect - we can no longer call kfree() here */
spin_unlock_irqrestore(&irq_lock, flags);
os_close_epoll_fd(); os_close_epoll_fd();
return 0; return 0;
} }
......
...@@ -203,10 +203,50 @@ void initial_thread_cb(void (*proc)(void *), void *arg) ...@@ -203,10 +203,50 @@ void initial_thread_cb(void (*proc)(void *), void *arg)
kmalloc_ok = save_kmalloc_ok; kmalloc_ok = save_kmalloc_ok;
} }
static void time_travel_sleep(unsigned long long duration)
{
unsigned long long next = time_travel_time + duration;
if (time_travel_mode != TT_MODE_INFCPU)
os_timer_disable();
if (time_travel_timer_mode != TT_TMR_DISABLED ||
time_travel_timer_expiry < next) {
if (time_travel_timer_mode == TT_TMR_ONESHOT)
time_travel_set_timer(TT_TMR_DISABLED, 0);
/*
* time_travel_time will be adjusted in the timer
* IRQ handler so it works even when the signal
* comes from the OS timer
*/
deliver_alarm();
} else {
time_travel_set_time(next);
}
if (time_travel_mode != TT_MODE_INFCPU) {
if (time_travel_timer_mode == TT_TMR_PERIODIC)
os_timer_set_interval(time_travel_timer_interval);
else if (time_travel_timer_mode == TT_TMR_ONESHOT)
os_timer_one_shot(time_travel_timer_expiry - next);
}
}
static void um_idle_sleep(void)
{
unsigned long long duration = UM_NSEC_PER_SEC;
if (time_travel_mode != TT_MODE_OFF) {
time_travel_sleep(duration);
} else {
os_idle_sleep(duration);
}
}
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
cpu_tasks[current_thread_info()->cpu].pid = os_getpid(); cpu_tasks[current_thread_info()->cpu].pid = os_getpid();
os_idle_sleep(UM_NSEC_PER_SEC); um_idle_sleep();
local_irq_enable(); local_irq_enable();
} }
......
...@@ -12,4 +12,6 @@ obj-y := clone.o mmu.o process.o syscall.o uaccess.o ...@@ -12,4 +12,6 @@ obj-y := clone.o mmu.o process.o syscall.o uaccess.o
CFLAGS_clone.o := $(CFLAGS_NO_HARDENING) CFLAGS_clone.o := $(CFLAGS_NO_HARDENING)
UNPROFILE_OBJS := clone.o UNPROFILE_OBJS := clone.o
KCOV_INSTRUMENT := n
include arch/um/scripts/Makefile.rules include arch/um/scripts/Makefile.rules
...@@ -10,12 +10,23 @@ ...@@ -10,12 +10,23 @@
#include <sysdep/ptrace.h> #include <sysdep/ptrace.h>
#include <sysdep/ptrace_user.h> #include <sysdep/ptrace_user.h>
#include <sysdep/syscalls.h> #include <sysdep/syscalls.h>
#include <shared/timer-internal.h>
void handle_syscall(struct uml_pt_regs *r) void handle_syscall(struct uml_pt_regs *r)
{ {
struct pt_regs *regs = container_of(r, struct pt_regs, regs); struct pt_regs *regs = container_of(r, struct pt_regs, regs);
int syscall; int syscall;
/*
* If we have infinite CPU resources, then make every syscall also a
* preemption point, since we don't have any other preemption in this
* case, and kernel threads would basically never run until userspace
* went to sleep, even if said userspace interacts with the kernel in
* various ways.
*/
if (time_travel_mode == TT_MODE_INFCPU)
schedule();
/* Initialize the syscall number and default return value. */ /* Initialize the syscall number and default return value. */
UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp); UPT_SYSCALL_NR(r) = PT_SYSCALL_NR(r->gp);
PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS); PT_REGS_SET_SYSCALL_RETURN(regs, -ENOSYS);
......
...@@ -19,11 +19,29 @@ ...@@ -19,11 +19,29 @@
#include <kern_util.h> #include <kern_util.h>
#include <os.h> #include <os.h>
#include <timer-internal.h> #include <timer-internal.h>
#include <shared/init.h>
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
enum time_travel_mode time_travel_mode;
unsigned long long time_travel_time;
enum time_travel_timer_mode time_travel_timer_mode;
unsigned long long time_travel_timer_expiry;
unsigned long long time_travel_timer_interval;
static bool time_travel_start_set;
static unsigned long long time_travel_start;
#else
#define time_travel_start_set 0
#define time_travel_start 0
#endif
void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
{ {
unsigned long flags; unsigned long flags;
if (time_travel_mode != TT_MODE_OFF)
time_travel_set_time(time_travel_timer_expiry);
local_irq_save(flags); local_irq_save(flags);
do_IRQ(TIMER_IRQ, regs); do_IRQ(TIMER_IRQ, regs);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -31,26 +49,47 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs) ...@@ -31,26 +49,47 @@ void timer_handler(int sig, struct siginfo *unused_si, struct uml_pt_regs *regs)
static int itimer_shutdown(struct clock_event_device *evt) static int itimer_shutdown(struct clock_event_device *evt)
{ {
os_timer_disable(); if (time_travel_mode != TT_MODE_OFF)
time_travel_set_timer(TT_TMR_DISABLED, 0);
if (time_travel_mode != TT_MODE_INFCPU)
os_timer_disable();
return 0; return 0;
} }
static int itimer_set_periodic(struct clock_event_device *evt) static int itimer_set_periodic(struct clock_event_device *evt)
{ {
os_timer_set_interval(NULL, NULL); unsigned long long interval = NSEC_PER_SEC / HZ;
if (time_travel_mode != TT_MODE_OFF)
time_travel_set_timer(TT_TMR_PERIODIC,
time_travel_time + interval);
if (time_travel_mode != TT_MODE_INFCPU)
os_timer_set_interval(interval);
return 0; return 0;
} }
static int itimer_next_event(unsigned long delta, static int itimer_next_event(unsigned long delta,
struct clock_event_device *evt) struct clock_event_device *evt)
{ {
return os_timer_one_shot(delta); delta += 1;
if (time_travel_mode != TT_MODE_OFF)
time_travel_set_timer(TT_TMR_ONESHOT,
time_travel_time + delta);
if (time_travel_mode != TT_MODE_INFCPU)
return os_timer_one_shot(delta);
return 0;
} }
static int itimer_one_shot(struct clock_event_device *evt) static int itimer_one_shot(struct clock_event_device *evt)
{ {
os_timer_one_shot(1); return itimer_next_event(0, evt);
return 0;
} }
static struct clock_event_device timer_clockevent = { static struct clock_event_device timer_clockevent = {
...@@ -87,6 +126,17 @@ static irqreturn_t um_timer(int irq, void *dev) ...@@ -87,6 +126,17 @@ static irqreturn_t um_timer(int irq, void *dev)
static u64 timer_read(struct clocksource *cs) static u64 timer_read(struct clocksource *cs)
{ {
if (time_travel_mode != TT_MODE_OFF) {
/*
* We make reading the timer cost a bit so that we don't get
* stuck in loops that expect time to move more than the
* exact requested sleep amount, e.g. python's socket server,
* see https://bugs.python.org/issue37026.
*/
time_travel_set_time(time_travel_time + TIMER_MULTIPLIER);
return time_travel_time / TIMER_MULTIPLIER;
}
return os_nsecs() / TIMER_MULTIPLIER; return os_nsecs() / TIMER_MULTIPLIER;
} }
...@@ -107,7 +157,7 @@ static void __init um_timer_setup(void) ...@@ -107,7 +157,7 @@ static void __init um_timer_setup(void)
printk(KERN_ERR "register_timer : request_irq failed - " printk(KERN_ERR "register_timer : request_irq failed - "
"errno = %d\n", -err); "errno = %d\n", -err);
err = os_timer_create(NULL); err = os_timer_create();
if (err != 0) { if (err != 0) {
printk(KERN_ERR "creation of timer failed - errno = %d\n", -err); printk(KERN_ERR "creation of timer failed - errno = %d\n", -err);
return; return;
...@@ -123,7 +173,12 @@ static void __init um_timer_setup(void) ...@@ -123,7 +173,12 @@ static void __init um_timer_setup(void)
void read_persistent_clock64(struct timespec64 *ts) void read_persistent_clock64(struct timespec64 *ts)
{ {
long long nsecs = os_persistent_clock_emulation(); long long nsecs;
if (time_travel_start_set)
nsecs = time_travel_start + time_travel_time;
else
nsecs = os_persistent_clock_emulation();
set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC, set_normalized_timespec64(ts, nsecs / NSEC_PER_SEC,
nsecs % NSEC_PER_SEC); nsecs % NSEC_PER_SEC);
...@@ -134,3 +189,65 @@ void __init time_init(void) ...@@ -134,3 +189,65 @@ void __init time_init(void)
timer_set_signal_handler(); timer_set_signal_handler();
late_time_init = um_timer_setup; late_time_init = um_timer_setup;
} }
#ifdef CONFIG_UML_TIME_TRAVEL_SUPPORT
unsigned long calibrate_delay_is_known(void)
{
if (time_travel_mode == TT_MODE_INFCPU)
return 1;
return 0;
}
int setup_time_travel(char *str)
{
if (strcmp(str, "=inf-cpu") == 0) {
time_travel_mode = TT_MODE_INFCPU;
timer_clockevent.name = "time-travel-timer-infcpu";
timer_clocksource.name = "time-travel-clock";
return 1;
}
if (!*str) {
time_travel_mode = TT_MODE_BASIC;
timer_clockevent.name = "time-travel-timer";
timer_clocksource.name = "time-travel-clock";
return 1;
}
return -EINVAL;
}
__setup("time-travel", setup_time_travel);
__uml_help(setup_time_travel,
"time-travel\n"
"This option just enables basic time travel mode, in which the clock/timers\n"
"inside the UML instance skip forward when there's nothing to do, rather than\n"
"waiting for real time to elapse. However, instance CPU speed is limited by\n"
"the real CPU speed, so e.g. a 10ms timer will always fire after ~10ms wall\n"
"clock (but quicker when there's nothing to do).\n"
"\n"
"time-travel=inf-cpu\n"
"This enables time travel mode with infinite processing power, in which there\n"
"are no wall clock timers, and any CPU processing happens - as seen from the\n"
"guest - instantly. This can be useful for accurate simulation regardless of\n"
"debug overhead, physical CPU speed, etc. but is somewhat dangerous as it can\n"
"easily lead to getting stuck (e.g. if anything in the system busy loops).\n");
int setup_time_travel_start(char *str)
{
int err;
err = kstrtoull(str, 0, &time_travel_start);
if (err)
return err;
time_travel_start_set = 1;
return 1;
}
__setup("time-travel-start", setup_time_travel_start);
__uml_help(setup_time_travel_start,
"time-travel-start=<seconds>\n"
"Configure the UML instance's wall clock to start at this value rather than\n"
"the host's wall clock at the time of UML boot.\n");
#endif
...@@ -26,11 +26,11 @@ static inline long long timeval_to_ns(const struct timeval *tv) ...@@ -26,11 +26,11 @@ static inline long long timeval_to_ns(const struct timeval *tv)
static inline long long timespec_to_ns(const struct timespec *ts) static inline long long timespec_to_ns(const struct timespec *ts)
{ {
return ((long long) ts->tv_sec * UM_NSEC_PER_SEC) + return ((long long) ts->tv_sec * UM_NSEC_PER_SEC) + ts->tv_nsec;
ts->tv_nsec;
} }
long long os_persistent_clock_emulation (void) { long long os_persistent_clock_emulation(void)
{
struct timespec realtime_tp; struct timespec realtime_tp;
clock_gettime(CLOCK_REALTIME, &realtime_tp); clock_gettime(CLOCK_REALTIME, &realtime_tp);
...@@ -40,94 +40,41 @@ long long os_persistent_clock_emulation (void) { ...@@ -40,94 +40,41 @@ long long os_persistent_clock_emulation (void) {
/** /**
* os_timer_create() - create an new posix (interval) timer * os_timer_create() - create an new posix (interval) timer
*/ */
int os_timer_create(void* timer) { int os_timer_create(void)
{
timer_t* t = timer; timer_t *t = &event_high_res_timer;
if(t == NULL) {
t = &event_high_res_timer;
}
if (timer_create( if (timer_create(CLOCK_MONOTONIC, NULL, t) == -1)
CLOCK_MONOTONIC,
NULL,
t) == -1) {
return -1; return -1;
}
return 0; return 0;
} }
int os_timer_set_interval(void* timer, void* i) int os_timer_set_interval(unsigned long long nsecs)
{ {
struct itimerspec its; struct itimerspec its;
unsigned long long nsec;
timer_t* t = timer;
struct itimerspec* its_in = i;
if(t == NULL) {
t = &event_high_res_timer;
}
nsec = UM_NSEC_PER_SEC / UM_HZ; its.it_value.tv_sec = nsecs / UM_NSEC_PER_SEC;
its.it_value.tv_nsec = nsecs % UM_NSEC_PER_SEC;
if(its_in != NULL) { its.it_interval.tv_sec = nsecs / UM_NSEC_PER_SEC;
its.it_value.tv_sec = its_in->it_value.tv_sec; its.it_interval.tv_nsec = nsecs % UM_NSEC_PER_SEC;
its.it_value.tv_nsec = its_in->it_value.tv_nsec;
} else {
its.it_value.tv_sec = 0;
its.it_value.tv_nsec = nsec;
}
its.it_interval.tv_sec = 0; if (timer_settime(event_high_res_timer, 0, &its, NULL) == -1)
its.it_interval.tv_nsec = nsec;
if(timer_settime(*t, 0, &its, NULL) == -1) {
return -errno; return -errno;
}
return 0; return 0;
} }
/** int os_timer_one_shot(unsigned long long nsecs)
* os_timer_remain() - returns the remaining nano seconds of the given interval
* timer
* Because this is the remaining time of an interval timer, which correspondends
* to HZ, this value can never be bigger than one second. Just
* the nanosecond part of the timer is returned.
* The returned time is relative to the start time of the interval timer.
* Return an negative value in an error case.
*/
long os_timer_remain(void* timer)
{ {
struct itimerspec its; struct itimerspec its = {
timer_t* t = timer; .it_value.tv_sec = nsecs / UM_NSEC_PER_SEC,
.it_value.tv_nsec = nsecs % UM_NSEC_PER_SEC,
if(t == NULL) {
t = &event_high_res_timer;
}
if(timer_gettime(t, &its) == -1) {
return -errno;
}
return its.it_value.tv_nsec; .it_interval.tv_sec = 0,
} .it_interval.tv_nsec = 0, // we cheat here
};
int os_timer_one_shot(int ticks)
{
struct itimerspec its;
unsigned long long nsec;
unsigned long sec;
nsec = (ticks + 1);
sec = nsec / UM_NSEC_PER_SEC;
nsec = nsec % UM_NSEC_PER_SEC;
its.it_value.tv_sec = nsec / UM_NSEC_PER_SEC;
its.it_value.tv_nsec = nsec;
its.it_interval.tv_sec = 0;
its.it_interval.tv_nsec = 0; // we cheat here
timer_settime(event_high_res_timer, 0, &its, NULL); timer_settime(event_high_res_timer, 0, &its, NULL);
return 0; return 0;
...@@ -135,24 +82,13 @@ int os_timer_one_shot(int ticks) ...@@ -135,24 +82,13 @@ int os_timer_one_shot(int ticks)
/** /**
* os_timer_disable() - disable the posix (interval) timer * os_timer_disable() - disable the posix (interval) timer
* Returns the remaining interval timer time in nanoseconds
*/ */
long long os_timer_disable(void) void os_timer_disable(void)
{ {
struct itimerspec its; struct itimerspec its;
memset(&its, 0, sizeof(struct itimerspec)); memset(&its, 0, sizeof(struct itimerspec));
timer_settime(event_high_res_timer, 0, &its, &its); timer_settime(event_high_res_timer, 0, &its, NULL);
return its.it_value.tv_sec * UM_NSEC_PER_SEC + its.it_value.tv_nsec;
}
long long os_vnsecs(void)
{
struct timespec ts;
clock_gettime(CLOCK_PROCESS_CPUTIME_ID,&ts);
return timespec_to_ns(&ts);
} }
long long os_nsecs(void) long long os_nsecs(void)
...@@ -169,21 +105,14 @@ long long os_nsecs(void) ...@@ -169,21 +105,14 @@ long long os_nsecs(void)
*/ */
void os_idle_sleep(unsigned long long nsecs) void os_idle_sleep(unsigned long long nsecs)
{ {
struct timespec ts; struct timespec ts = {
.tv_sec = nsecs / UM_NSEC_PER_SEC,
if (nsecs <= 0) { .tv_nsec = nsecs % UM_NSEC_PER_SEC
return; };
}
ts = ((struct timespec) {
.tv_sec = nsecs / UM_NSEC_PER_SEC,
.tv_nsec = nsecs % UM_NSEC_PER_SEC
});
/* /*
* Relay the signal if clock_nanosleep is interrupted. * Relay the signal if clock_nanosleep is interrupted.
*/ */
if (clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL)) { if (clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL))
deliver_alarm(); deliver_alarm();
}
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment