Commit 1943689c authored by Jeremy Fitzhardinge's avatar Jeremy Fitzhardinge

Merge branches 'for-linus/xen/dev-evtchn', 'for-linus/xen/xenbus',...

Merge branches 'for-linus/xen/dev-evtchn', 'for-linus/xen/xenbus', 'for-linus/xen/xenfs' and 'for-linus/xen/sys-hypervisor' into for-linus/xen/master

* for-linus/xen/dev-evtchn:
  xen/dev-evtchn: clean up locking in evtchn
  xen: export ioctl headers to userspace
  xen: add /dev/xen/evtchn driver
  xen: add irq_from_evtchn

* for-linus/xen/xenbus:
  xen/xenbus: export xenbus_dev_changed
  xen: use device model for suspending xenbus devices
  xen: remove suspend_cancel hook

* for-linus/xen/xenfs:
  xen: add "capabilities" file

* for-linus/xen/sys-hypervisor:
  xen: drop kexec bits from /sys/hypervisor since kexec isn't implemented yet
  xen/sys/hypervisor: change writable_pt to features
  xen: add /sys/hypervisor support

Conflicts:
	drivers/xen/Makefile
......@@ -18,6 +18,16 @@ config XEN_SCRUB_PAGES
secure, but slightly less efficient.
If in doubt, say yes.
config XEN_DEV_EVTCHN
tristate "Xen /dev/xen/evtchn device"
depends on XEN
default y
help
The evtchn driver allows a userspace process to triger event
channels and to receive notification of an event channel
firing.
If in doubt, say yes.
config XENFS
tristate "Xen filesystem"
depends on XEN
......@@ -41,3 +51,13 @@ config XEN_COMPAT_XENFS
a xen platform.
If in doubt, say yes.
config XEN_SYS_HYPERVISOR
bool "Create xen entries under /sys/hypervisor"
depends on XEN && SYSFS
select SYS_HYPERVISOR
default y
help
Create entries under /sys/hypervisor describing the Xen
hypervisor environment. When running native or in another
virtual environment, /sys/hypervisor will still be present,
but will have no xen contents.
\ No newline at end of file
......@@ -4,4 +4,6 @@ obj-y += xenbus/
obj-$(CONFIG_HOTPLUG_CPU) += cpu_hotplug.o
obj-$(CONFIG_XEN_XENCOMM) += xencomm.o
obj-$(CONFIG_XEN_BALLOON) += balloon.o
obj-$(CONFIG_XENFS) += xenfs/
\ No newline at end of file
obj-$(CONFIG_XEN_DEV_EVTCHN) += evtchn.o
obj-$(CONFIG_XENFS) += xenfs/
obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
\ No newline at end of file
......@@ -151,6 +151,12 @@ static unsigned int evtchn_from_irq(unsigned irq)
return info_for_irq(irq)->evtchn;
}
unsigned irq_from_evtchn(unsigned int evtchn)
{
return evtchn_to_irq[evtchn];
}
EXPORT_SYMBOL_GPL(irq_from_evtchn);
static enum ipi_vector ipi_from_irq(unsigned irq)
{
struct irq_info *info = info_for_irq(irq);
......
/******************************************************************************
* evtchn.c
*
* Driver for receiving and demuxing event-channel signals.
*
* Copyright (c) 2004-2005, K A Fraser
* Multi-process extensions Copyright (c) 2004, Steven Smith
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/errno.h>
#include <linux/miscdevice.h>
#include <linux/major.h>
#include <linux/proc_fs.h>
#include <linux/stat.h>
#include <linux/poll.h>
#include <linux/irq.h>
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/mutex.h>
#include <linux/cpu.h>
#include <xen/events.h>
#include <xen/evtchn.h>
#include <asm/xen/hypervisor.h>
struct per_user_data {
struct mutex bind_mutex; /* serialize bind/unbind operations */
/* Notification ring, accessed via /dev/xen/evtchn. */
#define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t))
#define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
evtchn_port_t *ring;
unsigned int ring_cons, ring_prod, ring_overflow;
struct mutex ring_cons_mutex; /* protect against concurrent readers */
/* Processes wait on this queue when ring is empty. */
wait_queue_head_t evtchn_wait;
struct fasync_struct *evtchn_async_queue;
const char *name;
};
/* Who's bound to each port? */
static struct per_user_data *port_user[NR_EVENT_CHANNELS];
static DEFINE_SPINLOCK(port_user_lock); /* protects port_user[] and ring_prod */
irqreturn_t evtchn_interrupt(int irq, void *data)
{
unsigned int port = (unsigned long)data;
struct per_user_data *u;
spin_lock(&port_user_lock);
u = port_user[port];
disable_irq_nosync(irq);
if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
wmb(); /* Ensure ring contents visible */
if (u->ring_cons == u->ring_prod++) {
wake_up_interruptible(&u->evtchn_wait);
kill_fasync(&u->evtchn_async_queue,
SIGIO, POLL_IN);
}
} else {
u->ring_overflow = 1;
}
spin_unlock(&port_user_lock);
return IRQ_HANDLED;
}
static ssize_t evtchn_read(struct file *file, char __user *buf,
size_t count, loff_t *ppos)
{
int rc;
unsigned int c, p, bytes1 = 0, bytes2 = 0;
struct per_user_data *u = file->private_data;
/* Whole number of ports. */
count &= ~(sizeof(evtchn_port_t)-1);
if (count == 0)
return 0;
if (count > PAGE_SIZE)
count = PAGE_SIZE;
for (;;) {
mutex_lock(&u->ring_cons_mutex);
rc = -EFBIG;
if (u->ring_overflow)
goto unlock_out;
c = u->ring_cons;
p = u->ring_prod;
if (c != p)
break;
mutex_unlock(&u->ring_cons_mutex);
if (file->f_flags & O_NONBLOCK)
return -EAGAIN;
rc = wait_event_interruptible(u->evtchn_wait,
u->ring_cons != u->ring_prod);
if (rc)
return rc;
}
/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
sizeof(evtchn_port_t);
bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
} else {
bytes1 = (p - c) * sizeof(evtchn_port_t);
bytes2 = 0;
}
/* Truncate chunks according to caller's maximum byte count. */
if (bytes1 > count) {
bytes1 = count;
bytes2 = 0;
} else if ((bytes1 + bytes2) > count) {
bytes2 = count - bytes1;
}
rc = -EFAULT;
rmb(); /* Ensure that we see the port before we copy it. */
if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
((bytes2 != 0) &&
copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
goto unlock_out;
u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
rc = bytes1 + bytes2;
unlock_out:
mutex_unlock(&u->ring_cons_mutex);
return rc;
}
static ssize_t evtchn_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
int rc, i;
evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
struct per_user_data *u = file->private_data;
if (kbuf == NULL)
return -ENOMEM;
/* Whole number of ports. */
count &= ~(sizeof(evtchn_port_t)-1);
rc = 0;
if (count == 0)
goto out;
if (count > PAGE_SIZE)
count = PAGE_SIZE;
rc = -EFAULT;
if (copy_from_user(kbuf, buf, count) != 0)
goto out;
spin_lock_irq(&port_user_lock);
for (i = 0; i < (count/sizeof(evtchn_port_t)); i++)
if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == u))
enable_irq(irq_from_evtchn(kbuf[i]));
spin_unlock_irq(&port_user_lock);
rc = count;
out:
free_page((unsigned long)kbuf);
return rc;
}
static int evtchn_bind_to_user(struct per_user_data *u, int port)
{
int rc = 0;
/*
* Ports are never reused, so every caller should pass in a
* unique port.
*
* (Locking not necessary because we haven't registered the
* interrupt handler yet, and our caller has already
* serialized bind operations.)
*/
BUG_ON(port_user[port] != NULL);
port_user[port] = u;
rc = bind_evtchn_to_irqhandler(port, evtchn_interrupt, IRQF_DISABLED,
u->name, (void *)(unsigned long)port);
if (rc >= 0)
rc = 0;
return rc;
}
static void evtchn_unbind_from_user(struct per_user_data *u, int port)
{
int irq = irq_from_evtchn(port);
unbind_from_irqhandler(irq, (void *)(unsigned long)port);
/* make sure we unbind the irq handler before clearing the port */
barrier();
port_user[port] = NULL;
}
static long evtchn_ioctl(struct file *file,
unsigned int cmd, unsigned long arg)
{
int rc;
struct per_user_data *u = file->private_data;
void __user *uarg = (void __user *) arg;
/* Prevent bind from racing with unbind */
mutex_lock(&u->bind_mutex);
switch (cmd) {
case IOCTL_EVTCHN_BIND_VIRQ: {
struct ioctl_evtchn_bind_virq bind;
struct evtchn_bind_virq bind_virq;
rc = -EFAULT;
if (copy_from_user(&bind, uarg, sizeof(bind)))
break;
bind_virq.virq = bind.virq;
bind_virq.vcpu = 0;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
&bind_virq);
if (rc != 0)
break;
rc = evtchn_bind_to_user(u, bind_virq.port);
if (rc == 0)
rc = bind_virq.port;
break;
}
case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
struct ioctl_evtchn_bind_interdomain bind;
struct evtchn_bind_interdomain bind_interdomain;
rc = -EFAULT;
if (copy_from_user(&bind, uarg, sizeof(bind)))
break;
bind_interdomain.remote_dom = bind.remote_domain;
bind_interdomain.remote_port = bind.remote_port;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
&bind_interdomain);
if (rc != 0)
break;
rc = evtchn_bind_to_user(u, bind_interdomain.local_port);
if (rc == 0)
rc = bind_interdomain.local_port;
break;
}
case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
struct ioctl_evtchn_bind_unbound_port bind;
struct evtchn_alloc_unbound alloc_unbound;
rc = -EFAULT;
if (copy_from_user(&bind, uarg, sizeof(bind)))
break;
alloc_unbound.dom = DOMID_SELF;
alloc_unbound.remote_dom = bind.remote_domain;
rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound,
&alloc_unbound);
if (rc != 0)
break;
rc = evtchn_bind_to_user(u, alloc_unbound.port);
if (rc == 0)
rc = alloc_unbound.port;
break;
}
case IOCTL_EVTCHN_UNBIND: {
struct ioctl_evtchn_unbind unbind;
rc = -EFAULT;
if (copy_from_user(&unbind, uarg, sizeof(unbind)))
break;
rc = -EINVAL;
if (unbind.port >= NR_EVENT_CHANNELS)
break;
spin_lock_irq(&port_user_lock);
rc = -ENOTCONN;
if (port_user[unbind.port] != u) {
spin_unlock_irq(&port_user_lock);
break;
}
evtchn_unbind_from_user(u, unbind.port);
spin_unlock_irq(&port_user_lock);
rc = 0;
break;
}
case IOCTL_EVTCHN_NOTIFY: {
struct ioctl_evtchn_notify notify;
rc = -EFAULT;
if (copy_from_user(&notify, uarg, sizeof(notify)))
break;
if (notify.port >= NR_EVENT_CHANNELS) {
rc = -EINVAL;
} else if (port_user[notify.port] != u) {
rc = -ENOTCONN;
} else {
notify_remote_via_evtchn(notify.port);
rc = 0;
}
break;
}
case IOCTL_EVTCHN_RESET: {
/* Initialise the ring to empty. Clear errors. */
mutex_lock(&u->ring_cons_mutex);
spin_lock_irq(&port_user_lock);
u->ring_cons = u->ring_prod = u->ring_overflow = 0;
spin_unlock_irq(&port_user_lock);
mutex_unlock(&u->ring_cons_mutex);
rc = 0;
break;
}
default:
rc = -ENOSYS;
break;
}
mutex_unlock(&u->bind_mutex);
return rc;
}
static unsigned int evtchn_poll(struct file *file, poll_table *wait)
{
unsigned int mask = POLLOUT | POLLWRNORM;
struct per_user_data *u = file->private_data;
poll_wait(file, &u->evtchn_wait, wait);
if (u->ring_cons != u->ring_prod)
mask |= POLLIN | POLLRDNORM;
if (u->ring_overflow)
mask = POLLERR;
return mask;
}
static int evtchn_fasync(int fd, struct file *filp, int on)
{
struct per_user_data *u = filp->private_data;
return fasync_helper(fd, filp, on, &u->evtchn_async_queue);
}
static int evtchn_open(struct inode *inode, struct file *filp)
{
struct per_user_data *u;
u = kzalloc(sizeof(*u), GFP_KERNEL);
if (u == NULL)
return -ENOMEM;
u->name = kasprintf(GFP_KERNEL, "evtchn:%s", current->comm);
if (u->name == NULL) {
kfree(u);
return -ENOMEM;
}
init_waitqueue_head(&u->evtchn_wait);
u->ring = (evtchn_port_t *)__get_free_page(GFP_KERNEL);
if (u->ring == NULL) {
kfree(u->name);
kfree(u);
return -ENOMEM;
}
mutex_init(&u->bind_mutex);
mutex_init(&u->ring_cons_mutex);
filp->private_data = u;
return 0;
}
static int evtchn_release(struct inode *inode, struct file *filp)
{
int i;
struct per_user_data *u = filp->private_data;
spin_lock_irq(&port_user_lock);
free_page((unsigned long)u->ring);
for (i = 0; i < NR_EVENT_CHANNELS; i++) {
if (port_user[i] != u)
continue;
evtchn_unbind_from_user(port_user[i], i);
}
spin_unlock_irq(&port_user_lock);
kfree(u->name);
kfree(u);
return 0;
}
static const struct file_operations evtchn_fops = {
.owner = THIS_MODULE,
.read = evtchn_read,
.write = evtchn_write,
.unlocked_ioctl = evtchn_ioctl,
.poll = evtchn_poll,
.fasync = evtchn_fasync,
.open = evtchn_open,
.release = evtchn_release,
};
static struct miscdevice evtchn_miscdev = {
.minor = MISC_DYNAMIC_MINOR,
.name = "evtchn",
.fops = &evtchn_fops,
};
static int __init evtchn_init(void)
{
int err;
if (!xen_domain())
return -ENODEV;
spin_lock_init(&port_user_lock);
memset(port_user, 0, sizeof(port_user));
/* Create '/dev/misc/evtchn'. */
err = misc_register(&evtchn_miscdev);
if (err != 0) {
printk(KERN_ALERT "Could not register /dev/misc/evtchn\n");
return err;
}
printk(KERN_INFO "Event-channel device installed.\n");
return 0;
}
static void __exit evtchn_cleanup(void)
{
misc_deregister(&evtchn_miscdev);
}
module_init(evtchn_init);
module_exit(evtchn_cleanup);
MODULE_LICENSE("GPL");
......@@ -104,9 +104,8 @@ static void do_suspend(void)
goto out;
}
printk("suspending xenbus...\n");
/* XXX use normal device tree? */
xenbus_suspend();
printk(KERN_DEBUG "suspending xenstore...\n");
xs_suspend();
err = stop_machine(xen_suspend, &cancelled, cpumask_of(0));
if (err) {
......@@ -116,9 +115,9 @@ static void do_suspend(void)
if (!cancelled) {
xen_arch_resume();
xenbus_resume();
xs_resume();
} else
xenbus_suspend_cancel();
xs_suspend_cancel();
device_resume(PMSG_RESUME);
......
/*
* copyright (c) 2006 IBM Corporation
* Authored by: Mike D. Day <ncmike@us.ibm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/kobject.h>
#include <asm/xen/hypervisor.h>
#include <asm/xen/hypercall.h>
#include <xen/xenbus.h>
#include <xen/interface/xen.h>
#include <xen/interface/version.h>
#define HYPERVISOR_ATTR_RO(_name) \
static struct hyp_sysfs_attr _name##_attr = __ATTR_RO(_name)
#define HYPERVISOR_ATTR_RW(_name) \
static struct hyp_sysfs_attr _name##_attr = \
__ATTR(_name, 0644, _name##_show, _name##_store)
struct hyp_sysfs_attr {
struct attribute attr;
ssize_t (*show)(struct hyp_sysfs_attr *, char *);
ssize_t (*store)(struct hyp_sysfs_attr *, const char *, size_t);
void *hyp_attr_data;
};
static ssize_t type_show(struct hyp_sysfs_attr *attr, char *buffer)
{
return sprintf(buffer, "xen\n");
}
HYPERVISOR_ATTR_RO(type);
static int __init xen_sysfs_type_init(void)
{
return sysfs_create_file(hypervisor_kobj, &type_attr.attr);
}
static void xen_sysfs_type_destroy(void)
{
sysfs_remove_file(hypervisor_kobj, &type_attr.attr);
}
/* xen version attributes */
static ssize_t major_show(struct hyp_sysfs_attr *attr, char *buffer)
{
int version = HYPERVISOR_xen_version(XENVER_version, NULL);
if (version)
return sprintf(buffer, "%d\n", version >> 16);
return -ENODEV;
}
HYPERVISOR_ATTR_RO(major);
static ssize_t minor_show(struct hyp_sysfs_attr *attr, char *buffer)
{
int version = HYPERVISOR_xen_version(XENVER_version, NULL);
if (version)
return sprintf(buffer, "%d\n", version & 0xff);
return -ENODEV;
}
HYPERVISOR_ATTR_RO(minor);
static ssize_t extra_show(struct hyp_sysfs_attr *attr, char *buffer)
{
int ret = -ENOMEM;
char *extra;
extra = kmalloc(XEN_EXTRAVERSION_LEN, GFP_KERNEL);
if (extra) {
ret = HYPERVISOR_xen_version(XENVER_extraversion, extra);
if (!ret)
ret = sprintf(buffer, "%s\n", extra);
kfree(extra);
}
return ret;
}
HYPERVISOR_ATTR_RO(extra);
static struct attribute *version_attrs[] = {
&major_attr.attr,
&minor_attr.attr,
&extra_attr.attr,
NULL
};
static struct attribute_group version_group = {
.name = "version",
.attrs = version_attrs,
};
static int __init xen_sysfs_version_init(void)
{
return sysfs_create_group(hypervisor_kobj, &version_group);
}
static void xen_sysfs_version_destroy(void)
{
sysfs_remove_group(hypervisor_kobj, &version_group);
}
/* UUID */
static ssize_t uuid_show(struct hyp_sysfs_attr *attr, char *buffer)
{
char *vm, *val;
int ret;
extern int xenstored_ready;
if (!xenstored_ready)
return -EBUSY;
vm = xenbus_read(XBT_NIL, "vm", "", NULL);
if (IS_ERR(vm))
return PTR_ERR(vm);
val = xenbus_read(XBT_NIL, vm, "uuid", NULL);
kfree(vm);
if (IS_ERR(val))
return PTR_ERR(val);
ret = sprintf(buffer, "%s\n", val);
kfree(val);
return ret;
}
HYPERVISOR_ATTR_RO(uuid);
static int __init xen_sysfs_uuid_init(void)
{
return sysfs_create_file(hypervisor_kobj, &uuid_attr.attr);
}
static void xen_sysfs_uuid_destroy(void)
{
sysfs_remove_file(hypervisor_kobj, &uuid_attr.attr);
}
/* xen compilation attributes */
static ssize_t compiler_show(struct hyp_sysfs_attr *attr, char *buffer)
{
int ret = -ENOMEM;
struct xen_compile_info *info;
info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
if (info) {
ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
if (!ret)
ret = sprintf(buffer, "%s\n", info->compiler);
kfree(info);
}
return ret;
}
HYPERVISOR_ATTR_RO(compiler);
static ssize_t compiled_by_show(struct hyp_sysfs_attr *attr, char *buffer)
{
int ret = -ENOMEM;
struct xen_compile_info *info;
info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
if (info) {
ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
if (!ret)
ret = sprintf(buffer, "%s\n", info->compile_by);
kfree(info);
}
return ret;
}
HYPERVISOR_ATTR_RO(compiled_by);
static ssize_t compile_date_show(struct hyp_sysfs_attr *attr, char *buffer)
{
int ret = -ENOMEM;
struct xen_compile_info *info;
info = kmalloc(sizeof(struct xen_compile_info), GFP_KERNEL);
if (info) {
ret = HYPERVISOR_xen_version(XENVER_compile_info, info);
if (!ret)
ret = sprintf(buffer, "%s\n", info->compile_date);
kfree(info);
}
return ret;
}
HYPERVISOR_ATTR_RO(compile_date);
static struct attribute *xen_compile_attrs[] = {
&compiler_attr.attr,
&compiled_by_attr.attr,
&compile_date_attr.attr,
NULL
};
static struct attribute_group xen_compilation_group = {
.name = "compilation",
.attrs = xen_compile_attrs,
};
int __init static xen_compilation_init(void)
{
return sysfs_create_group(hypervisor_kobj, &xen_compilation_group);
}
static void xen_compilation_destroy(void)
{
sysfs_remove_group(hypervisor_kobj, &xen_compilation_group);
}
/* xen properties info */
static ssize_t capabilities_show(struct hyp_sysfs_attr *attr, char *buffer)
{
int ret = -ENOMEM;
char *caps;
caps = kmalloc(XEN_CAPABILITIES_INFO_LEN, GFP_KERNEL);
if (caps) {
ret = HYPERVISOR_xen_version(XENVER_capabilities, caps);
if (!ret)
ret = sprintf(buffer, "%s\n", caps);
kfree(caps);
}
return ret;
}
HYPERVISOR_ATTR_RO(capabilities);
static ssize_t changeset_show(struct hyp_sysfs_attr *attr, char *buffer)
{
int ret = -ENOMEM;
char *cset;
cset = kmalloc(XEN_CHANGESET_INFO_LEN, GFP_KERNEL);
if (cset) {
ret = HYPERVISOR_xen_version(XENVER_changeset, cset);
if (!ret)
ret = sprintf(buffer, "%s\n", cset);
kfree(cset);
}
return ret;
}
HYPERVISOR_ATTR_RO(changeset);
static ssize_t virtual_start_show(struct hyp_sysfs_attr *attr, char *buffer)
{
int ret = -ENOMEM;
struct xen_platform_parameters *parms;
parms = kmalloc(sizeof(struct xen_platform_parameters), GFP_KERNEL);
if (parms) {
ret = HYPERVISOR_xen_version(XENVER_platform_parameters,
parms);
if (!ret)
ret = sprintf(buffer, "%lx\n", parms->virt_start);
kfree(parms);
}
return ret;
}
HYPERVISOR_ATTR_RO(virtual_start);
static ssize_t pagesize_show(struct hyp_sysfs_attr *attr, char *buffer)
{
int ret;
ret = HYPERVISOR_xen_version(XENVER_pagesize, NULL);
if (ret > 0)
ret = sprintf(buffer, "%x\n", ret);
return ret;
}
HYPERVISOR_ATTR_RO(pagesize);
static ssize_t xen_feature_show(int index, char *buffer)
{
ssize_t ret;
struct xen_feature_info info;
info.submap_idx = index;
ret = HYPERVISOR_xen_version(XENVER_get_features, &info);
if (!ret)
ret = sprintf(buffer, "%08x", info.submap);
return ret;
}
static ssize_t features_show(struct hyp_sysfs_attr *attr, char *buffer)
{
ssize_t len;
int i;
len = 0;
for (i = XENFEAT_NR_SUBMAPS-1; i >= 0; i--) {
int ret = xen_feature_show(i, buffer + len);
if (ret < 0) {
if (len == 0)
len = ret;
break;
}
len += ret;
}
if (len > 0)
buffer[len++] = '\n';
return len;
}
HYPERVISOR_ATTR_RO(features);
static struct attribute *xen_properties_attrs[] = {
&capabilities_attr.attr,
&changeset_attr.attr,
&virtual_start_attr.attr,
&pagesize_attr.attr,
&features_attr.attr,
NULL
};
static struct attribute_group xen_properties_group = {
.name = "properties",
.attrs = xen_properties_attrs,
};
static int __init xen_properties_init(void)
{
return sysfs_create_group(hypervisor_kobj, &xen_properties_group);
}
static void xen_properties_destroy(void)
{
sysfs_remove_group(hypervisor_kobj, &xen_properties_group);
}
static int __init hyper_sysfs_init(void)
{
int ret;
if (!xen_domain())
return -ENODEV;
ret = xen_sysfs_type_init();
if (ret)
goto out;
ret = xen_sysfs_version_init();
if (ret)
goto version_out;
ret = xen_compilation_init();
if (ret)
goto comp_out;
ret = xen_sysfs_uuid_init();
if (ret)
goto uuid_out;
ret = xen_properties_init();
if (ret)
goto prop_out;
goto out;
prop_out:
xen_sysfs_uuid_destroy();
uuid_out:
xen_compilation_destroy();
comp_out:
xen_sysfs_version_destroy();
version_out:
xen_sysfs_type_destroy();
out:
return ret;
}
static void __exit hyper_sysfs_exit(void)
{
xen_properties_destroy();
xen_compilation_destroy();
xen_sysfs_uuid_destroy();
xen_sysfs_version_destroy();
xen_sysfs_type_destroy();
}
module_init(hyper_sysfs_init);
module_exit(hyper_sysfs_exit);
static ssize_t hyp_sysfs_show(struct kobject *kobj,
struct attribute *attr,
char *buffer)
{
struct hyp_sysfs_attr *hyp_attr;
hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
if (hyp_attr->show)
return hyp_attr->show(hyp_attr, buffer);
return 0;
}
static ssize_t hyp_sysfs_store(struct kobject *kobj,
struct attribute *attr,
const char *buffer,
size_t len)
{
struct hyp_sysfs_attr *hyp_attr;
hyp_attr = container_of(attr, struct hyp_sysfs_attr, attr);
if (hyp_attr->store)
return hyp_attr->store(hyp_attr, buffer, len);
return 0;
}
static struct sysfs_ops hyp_sysfs_ops = {
.show = hyp_sysfs_show,
.store = hyp_sysfs_store,
};
static struct kobj_type hyp_sysfs_kobj_type = {
.sysfs_ops = &hyp_sysfs_ops,
};
static int __init hypervisor_subsys_init(void)
{
if (!xen_domain())
return -ENODEV;
hypervisor_kobj->ktype = &hyp_sysfs_kobj_type;
return 0;
}
device_initcall(hypervisor_subsys_init);
......@@ -71,6 +71,9 @@ static int xenbus_probe_frontend(const char *type, const char *name);
static void xenbus_dev_shutdown(struct device *_dev);
static int xenbus_dev_suspend(struct device *dev, pm_message_t state);
static int xenbus_dev_resume(struct device *dev);
/* If something in array of ids matches this device, return it. */
static const struct xenbus_device_id *
match_device(const struct xenbus_device_id *arr, struct xenbus_device *dev)
......@@ -188,6 +191,9 @@ static struct xen_bus_type xenbus_frontend = {
.remove = xenbus_dev_remove,
.shutdown = xenbus_dev_shutdown,
.dev_attrs = xenbus_dev_attrs,
.suspend = xenbus_dev_suspend,
.resume = xenbus_dev_resume,
},
};
......@@ -654,6 +660,7 @@ void xenbus_dev_changed(const char *node, struct xen_bus_type *bus)
kfree(root);
}
EXPORT_SYMBOL_GPL(xenbus_dev_changed);
static void frontend_changed(struct xenbus_watch *watch,
const char **vec, unsigned int len)
......@@ -669,7 +676,7 @@ static struct xenbus_watch fe_watch = {
.callback = frontend_changed,
};
static int suspend_dev(struct device *dev, void *data)
static int xenbus_dev_suspend(struct device *dev, pm_message_t state)
{
int err = 0;
struct xenbus_driver *drv;
......@@ -682,35 +689,14 @@ static int suspend_dev(struct device *dev, void *data)
drv = to_xenbus_driver(dev->driver);
xdev = container_of(dev, struct xenbus_device, dev);
if (drv->suspend)
err = drv->suspend(xdev);
err = drv->suspend(xdev, state);
if (err)
printk(KERN_WARNING
"xenbus: suspend %s failed: %i\n", dev_name(dev), err);
return 0;
}
static int suspend_cancel_dev(struct device *dev, void *data)
{
int err = 0;
struct xenbus_driver *drv;
struct xenbus_device *xdev;
DPRINTK("");
if (dev->driver == NULL)
return 0;
drv = to_xenbus_driver(dev->driver);
xdev = container_of(dev, struct xenbus_device, dev);
if (drv->suspend_cancel)
err = drv->suspend_cancel(xdev);
if (err)
printk(KERN_WARNING
"xenbus: suspend_cancel %s failed: %i\n",
dev_name(dev), err);
return 0;
}
static int resume_dev(struct device *dev, void *data)
static int xenbus_dev_resume(struct device *dev)
{
int err;
struct xenbus_driver *drv;
......@@ -755,33 +741,6 @@ static int resume_dev(struct device *dev, void *data)
return 0;
}
void xenbus_suspend(void)
{
DPRINTK("");
bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_dev);
xenbus_backend_suspend(suspend_dev);
xs_suspend();
}
EXPORT_SYMBOL_GPL(xenbus_suspend);
void xenbus_resume(void)
{
xb_init_comms();
xs_resume();
bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, resume_dev);
xenbus_backend_resume(resume_dev);
}
EXPORT_SYMBOL_GPL(xenbus_resume);
void xenbus_suspend_cancel(void)
{
xs_suspend_cancel();
bus_for_each_dev(&xenbus_frontend.bus, NULL, NULL, suspend_cancel_dev);
xenbus_backend_resume(suspend_cancel_dev);
}
EXPORT_SYMBOL_GPL(xenbus_suspend_cancel);
/* A flag to determine if xenstored is 'ready' (i.e. has started) */
int xenstored_ready = 0;
......
......@@ -673,6 +673,8 @@ void xs_resume(void)
struct xenbus_watch *watch;
char token[sizeof(watch) * 2 + 1];
xb_init_comms();
mutex_unlock(&xs_state.response_mutex);
mutex_unlock(&xs_state.request_mutex);
up_write(&xs_state.transaction_mutex);
......
......@@ -20,10 +20,27 @@
MODULE_DESCRIPTION("Xen filesystem");
MODULE_LICENSE("GPL");
static ssize_t capabilities_read(struct file *file, char __user *buf,
size_t size, loff_t *off)
{
char *tmp = "";
if (xen_initial_domain())
tmp = "control_d\n";
return simple_read_from_buffer(buf, size, off, tmp, strlen(tmp));
}
static const struct file_operations capabilities_file_ops = {
.read = capabilities_read,
};
static int xenfs_fill_super(struct super_block *sb, void *data, int silent)
{
static struct tree_descr xenfs_files[] = {
[2] = {"xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR},
[1] = {},
{ "xenbus", &xenbus_file_ops, S_IRUSR|S_IWUSR },
{ "capabilities", &capabilities_file_ops, S_IRUGO },
{""},
};
......
......@@ -8,3 +8,4 @@ header-y += mtd/
header-y += rdma/
header-y += video/
header-y += drm/
header-y += xen/
header-y += evtchn.h
......@@ -53,4 +53,7 @@ bool xen_test_irq_pending(int irq);
irq will be disabled so it won't deliver an interrupt. */
void xen_poll_irq(int irq);
/* Determine the IRQ which is bound to an event channel */
unsigned irq_from_evtchn(unsigned int evtchn);
#endif /* _XEN_EVENTS_H */
/******************************************************************************
* evtchn.h
*
* Interface to /dev/xen/evtchn.
*
* Copyright (c) 2003-2005, K A Fraser
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation; or, when distributed
* separately from the Linux kernel or incorporated into other
* software packages, subject to the following license:
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this source file (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy, modify,
* merge, publish, distribute, sublicense, and/or sell copies of the Software,
* and to permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#ifndef __LINUX_PUBLIC_EVTCHN_H__
#define __LINUX_PUBLIC_EVTCHN_H__
/*
* Bind a fresh port to VIRQ @virq.
* Return allocated port.
*/
#define IOCTL_EVTCHN_BIND_VIRQ \
_IOC(_IOC_NONE, 'E', 0, sizeof(struct ioctl_evtchn_bind_virq))
struct ioctl_evtchn_bind_virq {
unsigned int virq;
};
/*
* Bind a fresh port to remote <@remote_domain, @remote_port>.
* Return allocated port.
*/
#define IOCTL_EVTCHN_BIND_INTERDOMAIN \
_IOC(_IOC_NONE, 'E', 1, sizeof(struct ioctl_evtchn_bind_interdomain))
struct ioctl_evtchn_bind_interdomain {
unsigned int remote_domain, remote_port;
};
/*
* Allocate a fresh port for binding to @remote_domain.
* Return allocated port.
*/
#define IOCTL_EVTCHN_BIND_UNBOUND_PORT \
_IOC(_IOC_NONE, 'E', 2, sizeof(struct ioctl_evtchn_bind_unbound_port))
struct ioctl_evtchn_bind_unbound_port {
unsigned int remote_domain;
};
/*
* Unbind previously allocated @port.
*/
#define IOCTL_EVTCHN_UNBIND \
_IOC(_IOC_NONE, 'E', 3, sizeof(struct ioctl_evtchn_unbind))
struct ioctl_evtchn_unbind {
unsigned int port;
};
/*
* Unbind previously allocated @port.
*/
#define IOCTL_EVTCHN_NOTIFY \
_IOC(_IOC_NONE, 'E', 4, sizeof(struct ioctl_evtchn_notify))
struct ioctl_evtchn_notify {
unsigned int port;
};
/* Clear and reinitialise the event buffer. Clear error condition. */
#define IOCTL_EVTCHN_RESET \
_IOC(_IOC_NONE, 'E', 5, 0)
#endif /* __LINUX_PUBLIC_EVTCHN_H__ */
......@@ -57,4 +57,7 @@ struct xen_feature_info {
/* Declares the features reported by XENVER_get_features. */
#include "features.h"
/* arg == NULL; returns host memory page size. */
#define XENVER_pagesize 7
#endif /* __XEN_PUBLIC_VERSION_H__ */
......@@ -91,8 +91,7 @@ struct xenbus_driver {
void (*otherend_changed)(struct xenbus_device *dev,
enum xenbus_state backend_state);
int (*remove)(struct xenbus_device *dev);
int (*suspend)(struct xenbus_device *dev);
int (*suspend_cancel)(struct xenbus_device *dev);
int (*suspend)(struct xenbus_device *dev, pm_message_t state);
int (*resume)(struct xenbus_device *dev);
int (*uevent)(struct xenbus_device *, char **, int, char *, int);
struct device_driver driver;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment