Commit 0ba002bc authored by Hans de Goede's avatar Hans de Goede Committed by Greg Kroah-Hartman

virt: Add vboxguest driver for Virtual Box Guest integration

This commit adds a driver for the Virtual Box Guest PCI device used in
Virtual Box virtual machines. Enabling this driver will add support for
Virtual Box Guest integration features such as copy-and-paste, seamless
mode and OpenGL pass-through.

This driver also offers vboxguest IPC functionality which is needed
for the vboxfs driver which offers folder sharing support.
Signed-off-by: default avatarHans de Goede <hdegoede@redhat.com>
Reviewed-by: default avatarLarry Finger <Larry.Finger@lwfinger.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 579db9d4
......@@ -30,4 +30,5 @@ config FSL_HV_MANAGER
4) A kernel interface for receiving callbacks when a managed
partition shuts down.
source "drivers/virt/vboxguest/Kconfig"
endif
......@@ -3,3 +3,4 @@
#
obj-$(CONFIG_FSL_HV_MANAGER) += fsl_hypervisor.o
obj-y += vboxguest/
config VBOXGUEST
tristate "Virtual Box Guest integration support"
depends on X86 && PCI && INPUT
help
This is a driver for the Virtual Box Guest PCI device used in
Virtual Box virtual machines. Enabling this driver will add
support for Virtual Box Guest integration features such as
copy-and-paste, seamless mode and OpenGL pass-through.
This driver also offers vboxguest IPC functionality which is needed
for the vboxfs driver which offers folder sharing support.
If you enable this driver you should also enable the VBOXVIDEO option.
Although it is possible to build this module in, it is advised
to build this driver as a module, so that it can be updated
independently of the kernel. Select M to build this driver as a
module.
vboxguest-y := vboxguest_linux.o vboxguest_core.o vboxguest_utils.o
obj-$(CONFIG_VBOXGUEST) += vboxguest.o
/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
/*
* vboxguest core guest-device handling code, VBoxGuest.cpp in upstream svn.
*
* Copyright (C) 2007-2016 Oracle Corporation
*/
#include <linux/device.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/vbox_err.h>
#include <linux/vbox_utils.h>
#include <linux/vmalloc.h>
#include "vboxguest_core.h"
#include "vboxguest_version.h"
/* Get the pointer to the first HGCM parameter. */
#define VBG_IOCTL_HGCM_CALL_PARMS(a) \
((struct vmmdev_hgcm_function_parameter *)( \
(u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
/* Get the pointer to the first HGCM parameter in a 32-bit request. */
#define VBG_IOCTL_HGCM_CALL_PARMS32(a) \
((struct vmmdev_hgcm_function_parameter32 *)( \
(u8 *)(a) + sizeof(struct vbg_ioctl_hgcm_call)))
#define GUEST_MAPPINGS_TRIES 5
/**
* Reserves memory in which the VMM can relocate any guest mappings
* that are floating around.
*
* This operation is a little bit tricky since the VMM might not accept
* just any address because of address clashes between the three contexts
* it operates in, so we try several times.
*
* Failure to reserve the guest mappings is ignored.
*
* @gdev: The Guest extension device.
*/
static void vbg_guest_mappings_init(struct vbg_dev *gdev)
{
struct vmmdev_hypervisorinfo *req;
void *guest_mappings[GUEST_MAPPINGS_TRIES];
struct page **pages = NULL;
u32 size, hypervisor_size;
int i, rc;
/* Query the required space. */
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HYPERVISOR_INFO);
if (!req)
return;
req->hypervisor_start = 0;
req->hypervisor_size = 0;
rc = vbg_req_perform(gdev, req);
if (rc < 0)
goto out;
/*
* The VMM will report back if there is nothing it wants to map, like
* for instance in VT-x and AMD-V mode.
*/
if (req->hypervisor_size == 0)
goto out;
hypervisor_size = req->hypervisor_size;
/* Add 4M so that we can align the vmap to 4MiB as the host requires. */
size = PAGE_ALIGN(req->hypervisor_size) + SZ_4M;
pages = kmalloc(sizeof(*pages) * (size >> PAGE_SHIFT), GFP_KERNEL);
if (!pages)
goto out;
gdev->guest_mappings_dummy_page = alloc_page(GFP_HIGHUSER);
if (!gdev->guest_mappings_dummy_page)
goto out;
for (i = 0; i < (size >> PAGE_SHIFT); i++)
pages[i] = gdev->guest_mappings_dummy_page;
/*
* Try several times, the VMM might not accept some addresses because
* of address clashes between the three contexts.
*/
for (i = 0; i < GUEST_MAPPINGS_TRIES; i++) {
guest_mappings[i] = vmap(pages, (size >> PAGE_SHIFT),
VM_MAP, PAGE_KERNEL_RO);
if (!guest_mappings[i])
break;
req->header.request_type = VMMDEVREQ_SET_HYPERVISOR_INFO;
req->header.rc = VERR_INTERNAL_ERROR;
req->hypervisor_size = hypervisor_size;
req->hypervisor_start =
(unsigned long)PTR_ALIGN(guest_mappings[i], SZ_4M);
rc = vbg_req_perform(gdev, req);
if (rc >= 0) {
gdev->guest_mappings = guest_mappings[i];
break;
}
}
/* Free vmap's from failed attempts. */
while (--i >= 0)
vunmap(guest_mappings[i]);
/* On failure free the dummy-page backing the vmap */
if (!gdev->guest_mappings) {
__free_page(gdev->guest_mappings_dummy_page);
gdev->guest_mappings_dummy_page = NULL;
}
out:
kfree(req);
kfree(pages);
}
/**
* Undo what vbg_guest_mappings_init did.
*
* @gdev: The Guest extension device.
*/
static void vbg_guest_mappings_exit(struct vbg_dev *gdev)
{
struct vmmdev_hypervisorinfo *req;
int rc;
if (!gdev->guest_mappings)
return;
/*
* Tell the host that we're going to free the memory we reserved for
* it, the free it up. (Leak the memory if anything goes wrong here.)
*/
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_HYPERVISOR_INFO);
if (!req)
return;
req->hypervisor_start = 0;
req->hypervisor_size = 0;
rc = vbg_req_perform(gdev, req);
kfree(req);
if (rc < 0) {
vbg_err("%s error: %d\n", __func__, rc);
return;
}
vunmap(gdev->guest_mappings);
gdev->guest_mappings = NULL;
__free_page(gdev->guest_mappings_dummy_page);
gdev->guest_mappings_dummy_page = NULL;
}
/**
* Report the guest information to the host.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
*/
static int vbg_report_guest_info(struct vbg_dev *gdev)
{
/*
* Allocate and fill in the two guest info reports.
*/
struct vmmdev_guest_info *req1 = NULL;
struct vmmdev_guest_info2 *req2 = NULL;
int rc, ret = -ENOMEM;
req1 = vbg_req_alloc(sizeof(*req1), VMMDEVREQ_REPORT_GUEST_INFO);
req2 = vbg_req_alloc(sizeof(*req2), VMMDEVREQ_REPORT_GUEST_INFO2);
if (!req1 || !req2)
goto out_free;
req1->interface_version = VMMDEV_VERSION;
req1->os_type = VMMDEV_OSTYPE_LINUX26;
#if __BITS_PER_LONG == 64
req1->os_type |= VMMDEV_OSTYPE_X64;
#endif
req2->additions_major = VBG_VERSION_MAJOR;
req2->additions_minor = VBG_VERSION_MINOR;
req2->additions_build = VBG_VERSION_BUILD;
req2->additions_revision = VBG_SVN_REV;
/* (no features defined yet) */
req2->additions_features = 0;
strlcpy(req2->name, VBG_VERSION_STRING,
sizeof(req2->name));
/*
* There are two protocols here:
* 1. INFO2 + INFO1. Supported by >=3.2.51.
* 2. INFO1 and optionally INFO2. The old protocol.
*
* We try protocol 2 first. It will fail with VERR_NOT_SUPPORTED
* if not supported by the VMMDev (message ordering requirement).
*/
rc = vbg_req_perform(gdev, req2);
if (rc >= 0) {
rc = vbg_req_perform(gdev, req1);
} else if (rc == VERR_NOT_SUPPORTED || rc == VERR_NOT_IMPLEMENTED) {
rc = vbg_req_perform(gdev, req1);
if (rc >= 0) {
rc = vbg_req_perform(gdev, req2);
if (rc == VERR_NOT_IMPLEMENTED)
rc = VINF_SUCCESS;
}
}
ret = vbg_status_code_to_errno(rc);
out_free:
kfree(req2);
kfree(req1);
return ret;
}
/**
* Report the guest driver status to the host.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @active: Flag whether the driver is now active or not.
*/
static int vbg_report_driver_status(struct vbg_dev *gdev, bool active)
{
struct vmmdev_guest_status *req;
int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_REPORT_GUEST_STATUS);
if (!req)
return -ENOMEM;
req->facility = VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER;
if (active)
req->status = VBOXGUEST_FACILITY_STATUS_ACTIVE;
else
req->status = VBOXGUEST_FACILITY_STATUS_INACTIVE;
req->flags = 0;
rc = vbg_req_perform(gdev, req);
if (rc == VERR_NOT_IMPLEMENTED) /* Compatibility with older hosts. */
rc = VINF_SUCCESS;
kfree(req);
return vbg_status_code_to_errno(rc);
}
/**
* Inflate the balloon by one chunk. The caller owns the balloon mutex.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @chunk_idx: Index of the chunk.
*/
static int vbg_balloon_inflate(struct vbg_dev *gdev, u32 chunk_idx)
{
struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
struct page **pages;
int i, rc, ret;
pages = kmalloc(sizeof(*pages) * VMMDEV_MEMORY_BALLOON_CHUNK_PAGES,
GFP_KERNEL | __GFP_NOWARN);
if (!pages)
return -ENOMEM;
req->header.size = sizeof(*req);
req->inflate = true;
req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++) {
pages[i] = alloc_page(GFP_KERNEL | __GFP_NOWARN);
if (!pages[i]) {
ret = -ENOMEM;
goto out_error;
}
req->phys_page[i] = page_to_phys(pages[i]);
}
rc = vbg_req_perform(gdev, req);
if (rc < 0) {
vbg_err("%s error, rc: %d\n", __func__, rc);
ret = vbg_status_code_to_errno(rc);
goto out_error;
}
gdev->mem_balloon.pages[chunk_idx] = pages;
return 0;
out_error:
while (--i >= 0)
__free_page(pages[i]);
kfree(pages);
return ret;
}
/**
* Deflate the balloon by one chunk. The caller owns the balloon mutex.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @chunk_idx: Index of the chunk.
*/
static int vbg_balloon_deflate(struct vbg_dev *gdev, u32 chunk_idx)
{
struct vmmdev_memballoon_change *req = gdev->mem_balloon.change_req;
struct page **pages = gdev->mem_balloon.pages[chunk_idx];
int i, rc;
req->header.size = sizeof(*req);
req->inflate = false;
req->pages = VMMDEV_MEMORY_BALLOON_CHUNK_PAGES;
for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
req->phys_page[i] = page_to_phys(pages[i]);
rc = vbg_req_perform(gdev, req);
if (rc < 0) {
vbg_err("%s error, rc: %d\n", __func__, rc);
return vbg_status_code_to_errno(rc);
}
for (i = 0; i < VMMDEV_MEMORY_BALLOON_CHUNK_PAGES; i++)
__free_page(pages[i]);
kfree(pages);
gdev->mem_balloon.pages[chunk_idx] = NULL;
return 0;
}
/**
* Respond to VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events, query the size
* the host wants the balloon to be and adjust accordingly.
*/
static void vbg_balloon_work(struct work_struct *work)
{
struct vbg_dev *gdev =
container_of(work, struct vbg_dev, mem_balloon.work);
struct vmmdev_memballoon_info *req = gdev->mem_balloon.get_req;
u32 i, chunks;
int rc, ret;
/*
* Setting this bit means that we request the value from the host and
* change the guest memory balloon according to the returned value.
*/
req->event_ack = VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
rc = vbg_req_perform(gdev, req);
if (rc < 0) {
vbg_err("%s error, rc: %d)\n", __func__, rc);
return;
}
/*
* The host always returns the same maximum amount of chunks, so
* we do this once.
*/
if (!gdev->mem_balloon.max_chunks) {
gdev->mem_balloon.pages =
devm_kcalloc(gdev->dev, req->phys_mem_chunks,
sizeof(struct page **), GFP_KERNEL);
if (!gdev->mem_balloon.pages)
return;
gdev->mem_balloon.max_chunks = req->phys_mem_chunks;
}
chunks = req->balloon_chunks;
if (chunks > gdev->mem_balloon.max_chunks) {
vbg_err("%s: illegal balloon size %u (max=%u)\n",
__func__, chunks, gdev->mem_balloon.max_chunks);
return;
}
if (chunks > gdev->mem_balloon.chunks) {
/* inflate */
for (i = gdev->mem_balloon.chunks; i < chunks; i++) {
ret = vbg_balloon_inflate(gdev, i);
if (ret < 0)
return;
gdev->mem_balloon.chunks++;
}
} else {
/* deflate */
for (i = gdev->mem_balloon.chunks; i-- > chunks;) {
ret = vbg_balloon_deflate(gdev, i);
if (ret < 0)
return;
gdev->mem_balloon.chunks--;
}
}
}
/**
* Callback for heartbeat timer.
*/
static void vbg_heartbeat_timer(struct timer_list *t)
{
struct vbg_dev *gdev = from_timer(gdev, t, heartbeat_timer);
vbg_req_perform(gdev, gdev->guest_heartbeat_req);
mod_timer(&gdev->heartbeat_timer,
msecs_to_jiffies(gdev->heartbeat_interval_ms));
}
/**
* Configure the host to check guest's heartbeat
* and get heartbeat interval from the host.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @enabled: Set true to enable guest heartbeat checks on host.
*/
static int vbg_heartbeat_host_config(struct vbg_dev *gdev, bool enabled)
{
struct vmmdev_heartbeat *req;
int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_HEARTBEAT_CONFIGURE);
if (!req)
return -ENOMEM;
req->enabled = enabled;
req->interval_ns = 0;
rc = vbg_req_perform(gdev, req);
do_div(req->interval_ns, 1000000); /* ns -> ms */
gdev->heartbeat_interval_ms = req->interval_ns;
kfree(req);
return vbg_status_code_to_errno(rc);
}
/**
* Initializes the heartbeat timer. This feature may be disabled by the host.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
*/
static int vbg_heartbeat_init(struct vbg_dev *gdev)
{
int ret;
/* Make sure that heartbeat checking is disabled if we fail. */
ret = vbg_heartbeat_host_config(gdev, false);
if (ret < 0)
return ret;
ret = vbg_heartbeat_host_config(gdev, true);
if (ret < 0)
return ret;
/*
* Preallocate the request to use it from the timer callback because:
* 1) on Windows vbg_req_alloc must be called at IRQL <= APC_LEVEL
* and the timer callback runs at DISPATCH_LEVEL;
* 2) avoid repeated allocations.
*/
gdev->guest_heartbeat_req = vbg_req_alloc(
sizeof(*gdev->guest_heartbeat_req),
VMMDEVREQ_GUEST_HEARTBEAT);
if (!gdev->guest_heartbeat_req)
return -ENOMEM;
vbg_info("%s: Setting up heartbeat to trigger every %d milliseconds\n",
__func__, gdev->heartbeat_interval_ms);
mod_timer(&gdev->heartbeat_timer, 0);
return 0;
}
/**
* Cleanup hearbeat code, stop HB timer and disable host heartbeat checking.
* @gdev: The Guest extension device.
*/
static void vbg_heartbeat_exit(struct vbg_dev *gdev)
{
del_timer_sync(&gdev->heartbeat_timer);
vbg_heartbeat_host_config(gdev, false);
kfree(gdev->guest_heartbeat_req);
}
/**
* Applies a change to the bit usage tracker.
* Return: true if the mask changed, false if not.
* @tracker: The bit usage tracker.
* @changed: The bits to change.
* @previous: The previous value of the bits.
*/
static bool vbg_track_bit_usage(struct vbg_bit_usage_tracker *tracker,
u32 changed, u32 previous)
{
bool global_change = false;
while (changed) {
u32 bit = ffs(changed) - 1;
u32 bitmask = BIT(bit);
if (bitmask & previous) {
tracker->per_bit_usage[bit] -= 1;
if (tracker->per_bit_usage[bit] == 0) {
global_change = true;
tracker->mask &= ~bitmask;
}
} else {
tracker->per_bit_usage[bit] += 1;
if (tracker->per_bit_usage[bit] == 1) {
global_change = true;
tracker->mask |= bitmask;
}
}
changed &= ~bitmask;
}
return global_change;
}
/**
* Init and termination worker for resetting the (host) event filter on the host
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @fixed_events: Fixed events (init time).
*/
static int vbg_reset_host_event_filter(struct vbg_dev *gdev,
u32 fixed_events)
{
struct vmmdev_mask *req;
int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
if (!req)
return -ENOMEM;
req->not_mask = U32_MAX & ~fixed_events;
req->or_mask = fixed_events;
rc = vbg_req_perform(gdev, req);
if (rc < 0)
vbg_err("%s error, rc: %d\n", __func__, rc);
kfree(req);
return vbg_status_code_to_errno(rc);
}
/**
* Changes the event filter mask for the given session.
*
* This is called in response to VBG_IOCTL_CHANGE_FILTER_MASK as well as to
* do session cleanup. Takes the session spinlock.
*
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @session: The session.
* @or_mask: The events to add.
* @not_mask: The events to remove.
* @session_termination: Set if we're called by the session cleanup code.
* This tweaks the error handling so we perform
* proper session cleanup even if the host
* misbehaves.
*/
static int vbg_set_session_event_filter(struct vbg_dev *gdev,
struct vbg_session *session,
u32 or_mask, u32 not_mask,
bool session_termination)
{
struct vmmdev_mask *req;
u32 changed, previous;
int rc, ret = 0;
/* Allocate a request buffer before taking the spinlock */
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_CTL_GUEST_FILTER_MASK);
if (!req) {
if (!session_termination)
return -ENOMEM;
/* Ignore allocation failure, we must do session cleanup. */
}
mutex_lock(&gdev->session_mutex);
/* Apply the changes to the session mask. */
previous = session->event_filter;
session->event_filter |= or_mask;
session->event_filter &= ~not_mask;
/* If anything actually changed, update the global usage counters. */
changed = previous ^ session->event_filter;
if (!changed)
goto out;
vbg_track_bit_usage(&gdev->event_filter_tracker, changed, previous);
or_mask = gdev->fixed_events | gdev->event_filter_tracker.mask;
if (gdev->event_filter_host == or_mask || !req)
goto out;
gdev->event_filter_host = or_mask;
req->or_mask = or_mask;
req->not_mask = ~or_mask;
rc = vbg_req_perform(gdev, req);
if (rc < 0) {
ret = vbg_status_code_to_errno(rc);
/* Failed, roll back (unless it's session termination time). */
gdev->event_filter_host = U32_MAX;
if (session_termination)
goto out;
vbg_track_bit_usage(&gdev->event_filter_tracker, changed,
session->event_filter);
session->event_filter = previous;
}
out:
mutex_unlock(&gdev->session_mutex);
kfree(req);
return ret;
}
/**
* Init and termination worker for set guest capabilities to zero on the host.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
*/
static int vbg_reset_host_capabilities(struct vbg_dev *gdev)
{
struct vmmdev_mask *req;
int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
if (!req)
return -ENOMEM;
req->not_mask = U32_MAX;
req->or_mask = 0;
rc = vbg_req_perform(gdev, req);
if (rc < 0)
vbg_err("%s error, rc: %d\n", __func__, rc);
kfree(req);
return vbg_status_code_to_errno(rc);
}
/**
* Sets the guest capabilities for a session. Takes the session spinlock.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @session: The session.
* @or_mask: The capabilities to add.
* @not_mask: The capabilities to remove.
* @session_termination: Set if we're called by the session cleanup code.
* This tweaks the error handling so we perform
* proper session cleanup even if the host
* misbehaves.
*/
static int vbg_set_session_capabilities(struct vbg_dev *gdev,
struct vbg_session *session,
u32 or_mask, u32 not_mask,
bool session_termination)
{
struct vmmdev_mask *req;
u32 changed, previous;
int rc, ret = 0;
/* Allocate a request buffer before taking the spinlock */
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_GUEST_CAPABILITIES);
if (!req) {
if (!session_termination)
return -ENOMEM;
/* Ignore allocation failure, we must do session cleanup. */
}
mutex_lock(&gdev->session_mutex);
/* Apply the changes to the session mask. */
previous = session->guest_caps;
session->guest_caps |= or_mask;
session->guest_caps &= ~not_mask;
/* If anything actually changed, update the global usage counters. */
changed = previous ^ session->guest_caps;
if (!changed)
goto out;
vbg_track_bit_usage(&gdev->guest_caps_tracker, changed, previous);
or_mask = gdev->guest_caps_tracker.mask;
if (gdev->guest_caps_host == or_mask || !req)
goto out;
gdev->guest_caps_host = or_mask;
req->or_mask = or_mask;
req->not_mask = ~or_mask;
rc = vbg_req_perform(gdev, req);
if (rc < 0) {
ret = vbg_status_code_to_errno(rc);
/* Failed, roll back (unless it's session termination time). */
gdev->guest_caps_host = U32_MAX;
if (session_termination)
goto out;
vbg_track_bit_usage(&gdev->guest_caps_tracker, changed,
session->guest_caps);
session->guest_caps = previous;
}
out:
mutex_unlock(&gdev->session_mutex);
kfree(req);
return ret;
}
/**
* vbg_query_host_version get the host feature mask and version information.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
*/
static int vbg_query_host_version(struct vbg_dev *gdev)
{
struct vmmdev_host_version *req;
int rc, ret;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_GET_HOST_VERSION);
if (!req)
return -ENOMEM;
rc = vbg_req_perform(gdev, req);
ret = vbg_status_code_to_errno(rc);
if (ret)
goto out;
snprintf(gdev->host_version, sizeof(gdev->host_version), "%u.%u.%ur%u",
req->major, req->minor, req->build, req->revision);
gdev->host_features = req->features;
vbg_info("vboxguest: host-version: %s %#x\n", gdev->host_version,
gdev->host_features);
if (!(req->features & VMMDEV_HVF_HGCM_PHYS_PAGE_LIST)) {
vbg_err("vboxguest: Error host too old (does not support page-lists)\n");
ret = -ENODEV;
}
out:
kfree(req);
return ret;
}
/**
* Initializes the VBoxGuest device extension when the
* device driver is loaded.
*
* The native code locates the VMMDev on the PCI bus and retrieve
* the MMIO and I/O port ranges, this function will take care of
* mapping the MMIO memory (if present). Upon successful return
* the native code should set up the interrupt handler.
*
* Return: 0 or negative errno value.
*
* @gdev: The Guest extension device.
* @fixed_events: Events that will be enabled upon init and no client
* will ever be allowed to mask.
*/
int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events)
{
int ret = -ENOMEM;
gdev->fixed_events = fixed_events | VMMDEV_EVENT_HGCM;
gdev->event_filter_host = U32_MAX; /* forces a report */
gdev->guest_caps_host = U32_MAX; /* forces a report */
init_waitqueue_head(&gdev->event_wq);
init_waitqueue_head(&gdev->hgcm_wq);
spin_lock_init(&gdev->event_spinlock);
mutex_init(&gdev->session_mutex);
mutex_init(&gdev->cancel_req_mutex);
timer_setup(&gdev->heartbeat_timer, vbg_heartbeat_timer, 0);
INIT_WORK(&gdev->mem_balloon.work, vbg_balloon_work);
gdev->mem_balloon.get_req =
vbg_req_alloc(sizeof(*gdev->mem_balloon.get_req),
VMMDEVREQ_GET_MEMBALLOON_CHANGE_REQ);
gdev->mem_balloon.change_req =
vbg_req_alloc(sizeof(*gdev->mem_balloon.change_req),
VMMDEVREQ_CHANGE_MEMBALLOON);
gdev->cancel_req =
vbg_req_alloc(sizeof(*(gdev->cancel_req)),
VMMDEVREQ_HGCM_CANCEL2);
gdev->ack_events_req =
vbg_req_alloc(sizeof(*gdev->ack_events_req),
VMMDEVREQ_ACKNOWLEDGE_EVENTS);
gdev->mouse_status_req =
vbg_req_alloc(sizeof(*gdev->mouse_status_req),
VMMDEVREQ_GET_MOUSE_STATUS);
if (!gdev->mem_balloon.get_req || !gdev->mem_balloon.change_req ||
!gdev->cancel_req || !gdev->ack_events_req ||
!gdev->mouse_status_req)
goto err_free_reqs;
ret = vbg_query_host_version(gdev);
if (ret)
goto err_free_reqs;
ret = vbg_report_guest_info(gdev);
if (ret) {
vbg_err("vboxguest: vbg_report_guest_info error: %d\n", ret);
goto err_free_reqs;
}
ret = vbg_reset_host_event_filter(gdev, gdev->fixed_events);
if (ret) {
vbg_err("vboxguest: Error setting fixed event filter: %d\n",
ret);
goto err_free_reqs;
}
ret = vbg_reset_host_capabilities(gdev);
if (ret) {
vbg_err("vboxguest: Error clearing guest capabilities: %d\n",
ret);
goto err_free_reqs;
}
ret = vbg_core_set_mouse_status(gdev, 0);
if (ret) {
vbg_err("vboxguest: Error clearing mouse status: %d\n", ret);
goto err_free_reqs;
}
/* These may fail without requiring the driver init to fail. */
vbg_guest_mappings_init(gdev);
vbg_heartbeat_init(gdev);
/* All Done! */
ret = vbg_report_driver_status(gdev, true);
if (ret < 0)
vbg_err("vboxguest: Error reporting driver status: %d\n", ret);
return 0;
err_free_reqs:
kfree(gdev->mouse_status_req);
kfree(gdev->ack_events_req);
kfree(gdev->cancel_req);
kfree(gdev->mem_balloon.change_req);
kfree(gdev->mem_balloon.get_req);
return ret;
}
/**
* Call this on exit to clean-up vboxguest-core managed resources.
*
* The native code should call this before the driver is loaded,
* but don't call this on shutdown.
* @gdev: The Guest extension device.
*/
void vbg_core_exit(struct vbg_dev *gdev)
{
vbg_heartbeat_exit(gdev);
vbg_guest_mappings_exit(gdev);
/* Clear the host flags (mouse status etc). */
vbg_reset_host_event_filter(gdev, 0);
vbg_reset_host_capabilities(gdev);
vbg_core_set_mouse_status(gdev, 0);
kfree(gdev->mouse_status_req);
kfree(gdev->ack_events_req);
kfree(gdev->cancel_req);
kfree(gdev->mem_balloon.change_req);
kfree(gdev->mem_balloon.get_req);
}
/**
* Creates a VBoxGuest user session.
*
* vboxguest_linux.c calls this when userspace opens the char-device.
* Return: A pointer to the new session or an ERR_PTR on error.
* @gdev: The Guest extension device.
* @user: Set if this is a session for the vboxuser device.
*/
struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user)
{
struct vbg_session *session;
session = kzalloc(sizeof(*session), GFP_KERNEL);
if (!session)
return ERR_PTR(-ENOMEM);
session->gdev = gdev;
session->user_session = user;
return session;
}
/**
* Closes a VBoxGuest session.
* @session: The session to close (and free).
*/
void vbg_core_close_session(struct vbg_session *session)
{
struct vbg_dev *gdev = session->gdev;
int i, rc;
vbg_set_session_capabilities(gdev, session, 0, U32_MAX, true);
vbg_set_session_event_filter(gdev, session, 0, U32_MAX, true);
for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
if (!session->hgcm_client_ids[i])
continue;
vbg_hgcm_disconnect(gdev, session->hgcm_client_ids[i], &rc);
}
kfree(session);
}
static int vbg_ioctl_chk(struct vbg_ioctl_hdr *hdr, size_t in_size,
size_t out_size)
{
if (hdr->size_in != (sizeof(*hdr) + in_size) ||
hdr->size_out != (sizeof(*hdr) + out_size))
return -EINVAL;
return 0;
}
static int vbg_ioctl_driver_version_info(
struct vbg_ioctl_driver_version_info *info)
{
const u16 vbg_maj_version = VBG_IOC_VERSION >> 16;
u16 min_maj_version, req_maj_version;
if (vbg_ioctl_chk(&info->hdr, sizeof(info->u.in), sizeof(info->u.out)))
return -EINVAL;
req_maj_version = info->u.in.req_version >> 16;
min_maj_version = info->u.in.min_version >> 16;
if (info->u.in.min_version > info->u.in.req_version ||
min_maj_version != req_maj_version)
return -EINVAL;
if (info->u.in.min_version <= VBG_IOC_VERSION &&
min_maj_version == vbg_maj_version) {
info->u.out.session_version = VBG_IOC_VERSION;
} else {
info->u.out.session_version = U32_MAX;
info->hdr.rc = VERR_VERSION_MISMATCH;
}
info->u.out.driver_version = VBG_IOC_VERSION;
info->u.out.driver_revision = 0;
info->u.out.reserved1 = 0;
info->u.out.reserved2 = 0;
return 0;
}
static bool vbg_wait_event_cond(struct vbg_dev *gdev,
struct vbg_session *session,
u32 event_mask)
{
unsigned long flags;
bool wakeup;
u32 events;
spin_lock_irqsave(&gdev->event_spinlock, flags);
events = gdev->pending_events & event_mask;
wakeup = events || session->cancel_waiters;
spin_unlock_irqrestore(&gdev->event_spinlock, flags);
return wakeup;
}
/* Must be called with the event_lock held */
static u32 vbg_consume_events_locked(struct vbg_dev *gdev,
struct vbg_session *session,
u32 event_mask)
{
u32 events = gdev->pending_events & event_mask;
gdev->pending_events &= ~events;
return events;
}
static int vbg_ioctl_wait_for_events(struct vbg_dev *gdev,
struct vbg_session *session,
struct vbg_ioctl_wait_for_events *wait)
{
u32 timeout_ms = wait->u.in.timeout_ms;
u32 event_mask = wait->u.in.events;
unsigned long flags;
long timeout;
int ret = 0;
if (vbg_ioctl_chk(&wait->hdr, sizeof(wait->u.in), sizeof(wait->u.out)))
return -EINVAL;
if (timeout_ms == U32_MAX)
timeout = MAX_SCHEDULE_TIMEOUT;
else
timeout = msecs_to_jiffies(timeout_ms);
wait->u.out.events = 0;
do {
timeout = wait_event_interruptible_timeout(
gdev->event_wq,
vbg_wait_event_cond(gdev, session, event_mask),
timeout);
spin_lock_irqsave(&gdev->event_spinlock, flags);
if (timeout < 0 || session->cancel_waiters) {
ret = -EINTR;
} else if (timeout == 0) {
ret = -ETIMEDOUT;
} else {
wait->u.out.events =
vbg_consume_events_locked(gdev, session, event_mask);
}
spin_unlock_irqrestore(&gdev->event_spinlock, flags);
/*
* Someone else may have consumed the event(s) first, in
* which case we go back to waiting.
*/
} while (ret == 0 && wait->u.out.events == 0);
return ret;
}
static int vbg_ioctl_interrupt_all_wait_events(struct vbg_dev *gdev,
struct vbg_session *session,
struct vbg_ioctl_hdr *hdr)
{
unsigned long flags;
if (hdr->size_in != sizeof(*hdr) || hdr->size_out != sizeof(*hdr))
return -EINVAL;
spin_lock_irqsave(&gdev->event_spinlock, flags);
session->cancel_waiters = true;
spin_unlock_irqrestore(&gdev->event_spinlock, flags);
wake_up(&gdev->event_wq);
return 0;
}
/**
* Checks if the VMM request is allowed in the context of the given session.
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @session: The calling session.
* @req: The request.
*/
static int vbg_req_allowed(struct vbg_dev *gdev, struct vbg_session *session,
const struct vmmdev_request_header *req)
{
const struct vmmdev_guest_status *guest_status;
bool trusted_apps_only;
switch (req->request_type) {
/* Trusted users apps only. */
case VMMDEVREQ_QUERY_CREDENTIALS:
case VMMDEVREQ_REPORT_CREDENTIALS_JUDGEMENT:
case VMMDEVREQ_REGISTER_SHARED_MODULE:
case VMMDEVREQ_UNREGISTER_SHARED_MODULE:
case VMMDEVREQ_WRITE_COREDUMP:
case VMMDEVREQ_GET_CPU_HOTPLUG_REQ:
case VMMDEVREQ_SET_CPU_HOTPLUG_STATUS:
case VMMDEVREQ_CHECK_SHARED_MODULES:
case VMMDEVREQ_GET_PAGE_SHARING_STATUS:
case VMMDEVREQ_DEBUG_IS_PAGE_SHARED:
case VMMDEVREQ_REPORT_GUEST_STATS:
case VMMDEVREQ_REPORT_GUEST_USER_STATE:
case VMMDEVREQ_GET_STATISTICS_CHANGE_REQ:
trusted_apps_only = true;
break;
/* Anyone. */
case VMMDEVREQ_GET_MOUSE_STATUS:
case VMMDEVREQ_SET_MOUSE_STATUS:
case VMMDEVREQ_SET_POINTER_SHAPE:
case VMMDEVREQ_GET_HOST_VERSION:
case VMMDEVREQ_IDLE:
case VMMDEVREQ_GET_HOST_TIME:
case VMMDEVREQ_SET_POWER_STATUS:
case VMMDEVREQ_ACKNOWLEDGE_EVENTS:
case VMMDEVREQ_CTL_GUEST_FILTER_MASK:
case VMMDEVREQ_REPORT_GUEST_STATUS:
case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ:
case VMMDEVREQ_VIDEMODE_SUPPORTED:
case VMMDEVREQ_GET_HEIGHT_REDUCTION:
case VMMDEVREQ_GET_DISPLAY_CHANGE_REQ2:
case VMMDEVREQ_VIDEMODE_SUPPORTED2:
case VMMDEVREQ_VIDEO_ACCEL_ENABLE:
case VMMDEVREQ_VIDEO_ACCEL_FLUSH:
case VMMDEVREQ_VIDEO_SET_VISIBLE_REGION:
case VMMDEVREQ_GET_DISPLAY_CHANGE_REQEX:
case VMMDEVREQ_GET_SEAMLESS_CHANGE_REQ:
case VMMDEVREQ_GET_VRDPCHANGE_REQ:
case VMMDEVREQ_LOG_STRING:
case VMMDEVREQ_GET_SESSION_ID:
trusted_apps_only = false;
break;
/* Depends on the request parameters... */
case VMMDEVREQ_REPORT_GUEST_CAPABILITIES:
guest_status = (const struct vmmdev_guest_status *)req;
switch (guest_status->facility) {
case VBOXGUEST_FACILITY_TYPE_ALL:
case VBOXGUEST_FACILITY_TYPE_VBOXGUEST_DRIVER:
vbg_err("Denying userspace vmm report guest cap. call facility %#08x\n",
guest_status->facility);
return -EPERM;
case VBOXGUEST_FACILITY_TYPE_VBOX_SERVICE:
trusted_apps_only = true;
break;
case VBOXGUEST_FACILITY_TYPE_VBOX_TRAY_CLIENT:
case VBOXGUEST_FACILITY_TYPE_SEAMLESS:
case VBOXGUEST_FACILITY_TYPE_GRAPHICS:
default:
trusted_apps_only = false;
break;
}
break;
/* Anything else is not allowed. */
default:
vbg_err("Denying userspace vmm call type %#08x\n",
req->request_type);
return -EPERM;
}
if (trusted_apps_only && session->user_session) {
vbg_err("Denying userspace vmm call type %#08x through vboxuser device node\n",
req->request_type);
return -EPERM;
}
return 0;
}
static int vbg_ioctl_vmmrequest(struct vbg_dev *gdev,
struct vbg_session *session, void *data)
{
struct vbg_ioctl_hdr *hdr = data;
int ret;
if (hdr->size_in != hdr->size_out)
return -EINVAL;
if (hdr->size_in > VMMDEV_MAX_VMMDEVREQ_SIZE)
return -E2BIG;
if (hdr->type == VBG_IOCTL_HDR_TYPE_DEFAULT)
return -EINVAL;
ret = vbg_req_allowed(gdev, session, data);
if (ret < 0)
return ret;
vbg_req_perform(gdev, data);
WARN_ON(hdr->rc == VINF_HGCM_ASYNC_EXECUTE);
return 0;
}
static int vbg_ioctl_hgcm_connect(struct vbg_dev *gdev,
struct vbg_session *session,
struct vbg_ioctl_hgcm_connect *conn)
{
u32 client_id;
int i, ret;
if (vbg_ioctl_chk(&conn->hdr, sizeof(conn->u.in), sizeof(conn->u.out)))
return -EINVAL;
/* Find a free place in the sessions clients array and claim it */
mutex_lock(&gdev->session_mutex);
for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
if (!session->hgcm_client_ids[i]) {
session->hgcm_client_ids[i] = U32_MAX;
break;
}
}
mutex_unlock(&gdev->session_mutex);
if (i >= ARRAY_SIZE(session->hgcm_client_ids))
return -EMFILE;
ret = vbg_hgcm_connect(gdev, &conn->u.in.loc, &client_id,
&conn->hdr.rc);
mutex_lock(&gdev->session_mutex);
if (ret == 0 && conn->hdr.rc >= 0) {
conn->u.out.client_id = client_id;
session->hgcm_client_ids[i] = client_id;
} else {
conn->u.out.client_id = 0;
session->hgcm_client_ids[i] = 0;
}
mutex_unlock(&gdev->session_mutex);
return ret;
}
static int vbg_ioctl_hgcm_disconnect(struct vbg_dev *gdev,
struct vbg_session *session,
struct vbg_ioctl_hgcm_disconnect *disconn)
{
u32 client_id;
int i, ret;
if (vbg_ioctl_chk(&disconn->hdr, sizeof(disconn->u.in), 0))
return -EINVAL;
client_id = disconn->u.in.client_id;
if (client_id == 0 || client_id == U32_MAX)
return -EINVAL;
mutex_lock(&gdev->session_mutex);
for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++) {
if (session->hgcm_client_ids[i] == client_id) {
session->hgcm_client_ids[i] = U32_MAX;
break;
}
}
mutex_unlock(&gdev->session_mutex);
if (i >= ARRAY_SIZE(session->hgcm_client_ids))
return -EINVAL;
ret = vbg_hgcm_disconnect(gdev, client_id, &disconn->hdr.rc);
mutex_lock(&gdev->session_mutex);
if (ret == 0 && disconn->hdr.rc >= 0)
session->hgcm_client_ids[i] = 0;
else
session->hgcm_client_ids[i] = client_id;
mutex_unlock(&gdev->session_mutex);
return ret;
}
static int vbg_ioctl_hgcm_call(struct vbg_dev *gdev,
struct vbg_session *session, bool f32bit,
struct vbg_ioctl_hgcm_call *call)
{
size_t actual_size;
u32 client_id;
int i, ret;
if (call->hdr.size_in < sizeof(*call))
return -EINVAL;
if (call->hdr.size_in != call->hdr.size_out)
return -EINVAL;
if (call->parm_count > VMMDEV_HGCM_MAX_PARMS)
return -E2BIG;
client_id = call->client_id;
if (client_id == 0 || client_id == U32_MAX)
return -EINVAL;
actual_size = sizeof(*call);
if (f32bit)
actual_size += call->parm_count *
sizeof(struct vmmdev_hgcm_function_parameter32);
else
actual_size += call->parm_count *
sizeof(struct vmmdev_hgcm_function_parameter);
if (call->hdr.size_in < actual_size) {
vbg_debug("VBG_IOCTL_HGCM_CALL: hdr.size_in %d required size is %zd\n",
call->hdr.size_in, actual_size);
return -EINVAL;
}
call->hdr.size_out = actual_size;
/*
* Validate the client id.
*/
mutex_lock(&gdev->session_mutex);
for (i = 0; i < ARRAY_SIZE(session->hgcm_client_ids); i++)
if (session->hgcm_client_ids[i] == client_id)
break;
mutex_unlock(&gdev->session_mutex);
if (i >= ARRAY_SIZE(session->hgcm_client_ids)) {
vbg_debug("VBG_IOCTL_HGCM_CALL: INVALID handle. u32Client=%#08x\n",
client_id);
return -EINVAL;
}
if (f32bit)
ret = vbg_hgcm_call32(gdev, client_id,
call->function, call->timeout_ms,
VBG_IOCTL_HGCM_CALL_PARMS32(call),
call->parm_count, &call->hdr.rc);
else
ret = vbg_hgcm_call(gdev, client_id,
call->function, call->timeout_ms,
VBG_IOCTL_HGCM_CALL_PARMS(call),
call->parm_count, &call->hdr.rc);
if (ret == -E2BIG) {
/* E2BIG needs to be reported through the hdr.rc field. */
call->hdr.rc = VERR_OUT_OF_RANGE;
ret = 0;
}
if (ret && ret != -EINTR && ret != -ETIMEDOUT)
vbg_err("VBG_IOCTL_HGCM_CALL error: %d\n", ret);
return ret;
}
static int vbg_ioctl_log(struct vbg_ioctl_log *log)
{
if (log->hdr.size_out != sizeof(log->hdr))
return -EINVAL;
vbg_info("%.*s", (int)(log->hdr.size_in - sizeof(log->hdr)),
log->u.in.msg);
return 0;
}
static int vbg_ioctl_change_filter_mask(struct vbg_dev *gdev,
struct vbg_session *session,
struct vbg_ioctl_change_filter *filter)
{
u32 or_mask, not_mask;
if (vbg_ioctl_chk(&filter->hdr, sizeof(filter->u.in), 0))
return -EINVAL;
or_mask = filter->u.in.or_mask;
not_mask = filter->u.in.not_mask;
if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
return -EINVAL;
return vbg_set_session_event_filter(gdev, session, or_mask, not_mask,
false);
}
static int vbg_ioctl_change_guest_capabilities(struct vbg_dev *gdev,
struct vbg_session *session, struct vbg_ioctl_set_guest_caps *caps)
{
u32 or_mask, not_mask;
int ret;
if (vbg_ioctl_chk(&caps->hdr, sizeof(caps->u.in), sizeof(caps->u.out)))
return -EINVAL;
or_mask = caps->u.in.or_mask;
not_mask = caps->u.in.not_mask;
if ((or_mask | not_mask) & ~VMMDEV_EVENT_VALID_EVENT_MASK)
return -EINVAL;
ret = vbg_set_session_capabilities(gdev, session, or_mask, not_mask,
false);
if (ret)
return ret;
caps->u.out.session_caps = session->guest_caps;
caps->u.out.global_caps = gdev->guest_caps_host;
return 0;
}
static int vbg_ioctl_check_balloon(struct vbg_dev *gdev,
struct vbg_ioctl_check_balloon *balloon_info)
{
if (vbg_ioctl_chk(&balloon_info->hdr, 0, sizeof(balloon_info->u.out)))
return -EINVAL;
balloon_info->u.out.balloon_chunks = gdev->mem_balloon.chunks;
/*
* Under Linux we handle VMMDEV_EVENT_BALLOON_CHANGE_REQUEST
* events entirely in the kernel, see vbg_core_isr().
*/
balloon_info->u.out.handle_in_r3 = false;
return 0;
}
static int vbg_ioctl_write_core_dump(struct vbg_dev *gdev,
struct vbg_ioctl_write_coredump *dump)
{
struct vmmdev_write_core_dump *req;
if (vbg_ioctl_chk(&dump->hdr, sizeof(dump->u.in), 0))
return -EINVAL;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_WRITE_COREDUMP);
if (!req)
return -ENOMEM;
req->flags = dump->u.in.flags;
dump->hdr.rc = vbg_req_perform(gdev, req);
kfree(req);
return 0;
}
/**
* Common IOCtl for user to kernel communication.
* Return: 0 or negative errno value.
* @session: The client session.
* @req: The requested function.
* @data: The i/o data buffer, minimum size sizeof(struct vbg_ioctl_hdr).
*/
int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data)
{
unsigned int req_no_size = req & ~IOCSIZE_MASK;
struct vbg_dev *gdev = session->gdev;
struct vbg_ioctl_hdr *hdr = data;
bool f32bit = false;
hdr->rc = VINF_SUCCESS;
if (!hdr->size_out)
hdr->size_out = hdr->size_in;
/*
* hdr->version and hdr->size_in / hdr->size_out minimum size are
* already checked by vbg_misc_device_ioctl().
*/
/* For VMMDEV_REQUEST hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT */
if (req_no_size == VBG_IOCTL_VMMDEV_REQUEST(0) ||
req == VBG_IOCTL_VMMDEV_REQUEST_BIG)
return vbg_ioctl_vmmrequest(gdev, session, data);
if (hdr->type != VBG_IOCTL_HDR_TYPE_DEFAULT)
return -EINVAL;
/* Fixed size requests. */
switch (req) {
case VBG_IOCTL_DRIVER_VERSION_INFO:
return vbg_ioctl_driver_version_info(data);
case VBG_IOCTL_HGCM_CONNECT:
return vbg_ioctl_hgcm_connect(gdev, session, data);
case VBG_IOCTL_HGCM_DISCONNECT:
return vbg_ioctl_hgcm_disconnect(gdev, session, data);
case VBG_IOCTL_WAIT_FOR_EVENTS:
return vbg_ioctl_wait_for_events(gdev, session, data);
case VBG_IOCTL_INTERRUPT_ALL_WAIT_FOR_EVENTS:
return vbg_ioctl_interrupt_all_wait_events(gdev, session, data);
case VBG_IOCTL_CHANGE_FILTER_MASK:
return vbg_ioctl_change_filter_mask(gdev, session, data);
case VBG_IOCTL_CHANGE_GUEST_CAPABILITIES:
return vbg_ioctl_change_guest_capabilities(gdev, session, data);
case VBG_IOCTL_CHECK_BALLOON:
return vbg_ioctl_check_balloon(gdev, data);
case VBG_IOCTL_WRITE_CORE_DUMP:
return vbg_ioctl_write_core_dump(gdev, data);
}
/* Variable sized requests. */
switch (req_no_size) {
#ifdef CONFIG_COMPAT
case VBG_IOCTL_HGCM_CALL_32(0):
f32bit = true;
/* Fall through */
#endif
case VBG_IOCTL_HGCM_CALL(0):
return vbg_ioctl_hgcm_call(gdev, session, f32bit, data);
case VBG_IOCTL_LOG(0):
return vbg_ioctl_log(data);
}
vbg_debug("VGDrvCommonIoCtl: Unknown req %#08x\n", req);
return -ENOTTY;
}
/**
* Report guest supported mouse-features to the host.
*
* Return: 0 or negative errno value.
* @gdev: The Guest extension device.
* @features: The set of features to report to the host.
*/
int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features)
{
struct vmmdev_mouse_status *req;
int rc;
req = vbg_req_alloc(sizeof(*req), VMMDEVREQ_SET_MOUSE_STATUS);
if (!req)
return -ENOMEM;
req->mouse_features = features;
req->pointer_pos_x = 0;
req->pointer_pos_y = 0;
rc = vbg_req_perform(gdev, req);
if (rc < 0)
vbg_err("%s error, rc: %d\n", __func__, rc);
kfree(req);
return vbg_status_code_to_errno(rc);
}
/** Core interrupt service routine. */
irqreturn_t vbg_core_isr(int irq, void *dev_id)
{
struct vbg_dev *gdev = dev_id;
struct vmmdev_events *req = gdev->ack_events_req;
bool mouse_position_changed = false;
unsigned long flags;
u32 events = 0;
int rc;
if (!gdev->mmio->V.V1_04.have_events)
return IRQ_NONE;
/* Get and acknowlegde events. */
req->header.rc = VERR_INTERNAL_ERROR;
req->events = 0;
rc = vbg_req_perform(gdev, req);
if (rc < 0) {
vbg_err("Error performing events req, rc: %d\n", rc);
return IRQ_NONE;
}
events = req->events;
if (events & VMMDEV_EVENT_MOUSE_POSITION_CHANGED) {
mouse_position_changed = true;
events &= ~VMMDEV_EVENT_MOUSE_POSITION_CHANGED;
}
if (events & VMMDEV_EVENT_HGCM) {
wake_up(&gdev->hgcm_wq);
events &= ~VMMDEV_EVENT_HGCM;
}
if (events & VMMDEV_EVENT_BALLOON_CHANGE_REQUEST) {
schedule_work(&gdev->mem_balloon.work);
events &= ~VMMDEV_EVENT_BALLOON_CHANGE_REQUEST;
}
if (events) {
spin_lock_irqsave(&gdev->event_spinlock, flags);
gdev->pending_events |= events;
spin_unlock_irqrestore(&gdev->event_spinlock, flags);
wake_up(&gdev->event_wq);
}
if (mouse_position_changed)
vbg_linux_mouse_event(gdev);
return IRQ_HANDLED;
}
/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
/* Copyright (C) 2010-2016 Oracle Corporation */
#ifndef __VBOXGUEST_CORE_H__
#define __VBOXGUEST_CORE_H__
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/miscdevice.h>
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/workqueue.h>
#include <linux/vboxguest.h>
#include "vmmdev.h"
struct vbg_session;
/** VBox guest memory balloon. */
struct vbg_mem_balloon {
/** Work handling VMMDEV_EVENT_BALLOON_CHANGE_REQUEST events */
struct work_struct work;
/** Pre-allocated vmmdev_memballoon_info req for query */
struct vmmdev_memballoon_info *get_req;
/** Pre-allocated vmmdev_memballoon_change req for inflate / deflate */
struct vmmdev_memballoon_change *change_req;
/** The current number of chunks in the balloon. */
u32 chunks;
/** The maximum number of chunks in the balloon. */
u32 max_chunks;
/**
* Array of pointers to page arrays. A page * array is allocated for
* each chunk when inflating, and freed when the deflating.
*/
struct page ***pages;
};
/**
* Per bit usage tracker for a u32 mask.
*
* Used for optimal handling of guest properties and event filter.
*/
struct vbg_bit_usage_tracker {
/** Per bit usage counters. */
u32 per_bit_usage[32];
/** The current mask according to per_bit_usage. */
u32 mask;
};
/** VBox guest device (data) extension. */
struct vbg_dev {
struct device *dev;
/** The base of the adapter I/O ports. */
u16 io_port;
/** Pointer to the mapping of the VMMDev adapter memory. */
struct vmmdev_memory *mmio;
/** Host version */
char host_version[64];
/** Host features */
unsigned int host_features;
/**
* Dummy page and vmap address for reserved kernel virtual-address
* space for the guest mappings, only used on hosts lacking vtx.
*/
struct page *guest_mappings_dummy_page;
void *guest_mappings;
/** Spinlock protecting pending_events. */
spinlock_t event_spinlock;
/** Preallocated struct vmmdev_events for the IRQ handler. */
struct vmmdev_events *ack_events_req;
/** Wait-for-event list for threads waiting for multiple events. */
wait_queue_head_t event_wq;
/** Mask of pending events. */
u32 pending_events;
/** Wait-for-event list for threads waiting on HGCM async completion. */
wait_queue_head_t hgcm_wq;
/** Pre-allocated hgcm cancel2 req. for cancellation on timeout */
struct vmmdev_hgcm_cancel2 *cancel_req;
/** Mutex protecting cancel_req accesses */
struct mutex cancel_req_mutex;
/** Pre-allocated mouse-status request for the input-device handling. */
struct vmmdev_mouse_status *mouse_status_req;
/** Input device for reporting abs mouse coordinates to the guest. */
struct input_dev *input;
/** Memory balloon information. */
struct vbg_mem_balloon mem_balloon;
/** Lock for session related items in vbg_dev and vbg_session */
struct mutex session_mutex;
/** Events we won't permit anyone to filter out. */
u32 fixed_events;
/**
* Usage counters for the host events (excludes fixed events),
* Protected by session_mutex.
*/
struct vbg_bit_usage_tracker event_filter_tracker;
/**
* The event filter last reported to the host (or UINT32_MAX).
* Protected by session_mutex.
*/
u32 event_filter_host;
/**
* Usage counters for guest capabilities. Indexed by capability bit
* number, one count per session using a capability.
* Protected by session_mutex.
*/
struct vbg_bit_usage_tracker guest_caps_tracker;
/**
* The guest capabilities last reported to the host (or UINT32_MAX).
* Protected by session_mutex.
*/
u32 guest_caps_host;
/**
* Heartbeat timer which fires with interval
* cNsHearbeatInterval and its handler sends
* VMMDEVREQ_GUEST_HEARTBEAT to VMMDev.
*/
struct timer_list heartbeat_timer;
/** Heartbeat timer interval in ms. */
int heartbeat_interval_ms;
/** Preallocated VMMDEVREQ_GUEST_HEARTBEAT request. */
struct vmmdev_request_header *guest_heartbeat_req;
/** "vboxguest" char-device */
struct miscdevice misc_device;
/** "vboxuser" char-device */
struct miscdevice misc_device_user;
};
/** The VBoxGuest per session data. */
struct vbg_session {
/** Pointer to the device extension. */
struct vbg_dev *gdev;
/**
* Array containing HGCM client IDs associated with this session.
* These will be automatically disconnected when the session is closed.
* Protected by vbg_gdev.session_mutex.
*/
u32 hgcm_client_ids[64];
/**
* Host events requested by the session.
* An event type requested in any guest session will be added to the
* host filter. Protected by vbg_gdev.session_mutex.
*/
u32 event_filter;
/**
* Guest capabilities for this session.
* A capability claimed by any guest session will be reported to the
* host. Protected by vbg_gdev.session_mutex.
*/
u32 guest_caps;
/** Does this session belong to a root process or a user one? */
bool user_session;
/** Set on CANCEL_ALL_WAITEVENTS, protected by vbg_devevent_spinlock. */
bool cancel_waiters;
};
int vbg_core_init(struct vbg_dev *gdev, u32 fixed_events);
void vbg_core_exit(struct vbg_dev *gdev);
struct vbg_session *vbg_core_open_session(struct vbg_dev *gdev, bool user);
void vbg_core_close_session(struct vbg_session *session);
int vbg_core_ioctl(struct vbg_session *session, unsigned int req, void *data);
int vbg_core_set_mouse_status(struct vbg_dev *gdev, u32 features);
irqreturn_t vbg_core_isr(int irq, void *dev_id);
void vbg_linux_mouse_event(struct vbg_dev *gdev);
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/*
* vboxguest linux pci driver, char-dev and input-device code,
*
* Copyright (C) 2006-2016 Oracle Corporation
*/
#include <linux/input.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/module.h>
#include <linux/pci.h>
#include <linux/poll.h>
#include <linux/vbox_utils.h>
#include "vboxguest_core.h"
/** The device name. */
#define DEVICE_NAME "vboxguest"
/** The device name for the device node open to everyone. */
#define DEVICE_NAME_USER "vboxuser"
/** VirtualBox PCI vendor ID. */
#define VBOX_VENDORID 0x80ee
/** VMMDev PCI card product ID. */
#define VMMDEV_DEVICEID 0xcafe
/** Mutex protecting the global vbg_gdev pointer used by vbg_get/put_gdev. */
static DEFINE_MUTEX(vbg_gdev_mutex);
/** Global vbg_gdev pointer used by vbg_get/put_gdev. */
static struct vbg_dev *vbg_gdev;
static int vbg_misc_device_open(struct inode *inode, struct file *filp)
{
struct vbg_session *session;
struct vbg_dev *gdev;
/* misc_open sets filp->private_data to our misc device */
gdev = container_of(filp->private_data, struct vbg_dev, misc_device);
session = vbg_core_open_session(gdev, false);
if (IS_ERR(session))
return PTR_ERR(session);
filp->private_data = session;
return 0;
}
static int vbg_misc_device_user_open(struct inode *inode, struct file *filp)
{
struct vbg_session *session;
struct vbg_dev *gdev;
/* misc_open sets filp->private_data to our misc device */
gdev = container_of(filp->private_data, struct vbg_dev,
misc_device_user);
session = vbg_core_open_session(gdev, false);
if (IS_ERR(session))
return PTR_ERR(session);
filp->private_data = session;
return 0;
}
/**
* Close device.
* Return: 0 on success, negated errno on failure.
* @inode: Pointer to inode info structure.
* @filp: Associated file pointer.
*/
static int vbg_misc_device_close(struct inode *inode, struct file *filp)
{
vbg_core_close_session(filp->private_data);
filp->private_data = NULL;
return 0;
}
/**
* Device I/O Control entry point.
* Return: 0 on success, negated errno on failure.
* @filp: Associated file pointer.
* @req: The request specified to ioctl().
* @arg: The argument specified to ioctl().
*/
static long vbg_misc_device_ioctl(struct file *filp, unsigned int req,
unsigned long arg)
{
struct vbg_session *session = filp->private_data;
size_t returned_size, size;
struct vbg_ioctl_hdr hdr;
int ret = 0;
void *buf;
if (copy_from_user(&hdr, (void *)arg, sizeof(hdr)))
return -EFAULT;
if (hdr.version != VBG_IOCTL_HDR_VERSION)
return -EINVAL;
if (hdr.size_in < sizeof(hdr) ||
(hdr.size_out && hdr.size_out < sizeof(hdr)))
return -EINVAL;
size = max(hdr.size_in, hdr.size_out);
if (_IOC_SIZE(req) && _IOC_SIZE(req) != size)
return -EINVAL;
if (size > SZ_16M)
return -E2BIG;
/* __GFP_DMA32 because IOCTL_VMMDEV_REQUEST passes this to the host */
buf = kmalloc(size, GFP_KERNEL | __GFP_DMA32);
if (!buf)
return -ENOMEM;
if (copy_from_user(buf, (void *)arg, hdr.size_in)) {
ret = -EFAULT;
goto out;
}
if (hdr.size_in < size)
memset(buf + hdr.size_in, 0, size - hdr.size_in);
ret = vbg_core_ioctl(session, req, buf);
if (ret)
goto out;
returned_size = ((struct vbg_ioctl_hdr *)buf)->size_out;
if (returned_size > size) {
vbg_debug("%s: too much output data %zu > %zu\n",
__func__, returned_size, size);
returned_size = size;
}
if (copy_to_user((void *)arg, buf, returned_size) != 0)
ret = -EFAULT;
out:
kfree(buf);
return ret;
}
/** The file_operations structures. */
static const struct file_operations vbg_misc_device_fops = {
.owner = THIS_MODULE,
.open = vbg_misc_device_open,
.release = vbg_misc_device_close,
.unlocked_ioctl = vbg_misc_device_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vbg_misc_device_ioctl,
#endif
};
static const struct file_operations vbg_misc_device_user_fops = {
.owner = THIS_MODULE,
.open = vbg_misc_device_user_open,
.release = vbg_misc_device_close,
.unlocked_ioctl = vbg_misc_device_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = vbg_misc_device_ioctl,
#endif
};
/**
* Called when the input device is first opened.
*
* Sets up absolute mouse reporting.
*/
static int vbg_input_open(struct input_dev *input)
{
struct vbg_dev *gdev = input_get_drvdata(input);
u32 feat = VMMDEV_MOUSE_GUEST_CAN_ABSOLUTE | VMMDEV_MOUSE_NEW_PROTOCOL;
int ret;
ret = vbg_core_set_mouse_status(gdev, feat);
if (ret)
return ret;
return 0;
}
/**
* Called if all open handles to the input device are closed.
*
* Disables absolute reporting.
*/
static void vbg_input_close(struct input_dev *input)
{
struct vbg_dev *gdev = input_get_drvdata(input);
vbg_core_set_mouse_status(gdev, 0);
}
/**
* Creates the kernel input device.
*
* Return: 0 on success, negated errno on failure.
*/
static int vbg_create_input_device(struct vbg_dev *gdev)
{
struct input_dev *input;
input = devm_input_allocate_device(gdev->dev);
if (!input)
return -ENOMEM;
input->id.bustype = BUS_PCI;
input->id.vendor = VBOX_VENDORID;
input->id.product = VMMDEV_DEVICEID;
input->open = vbg_input_open;
input->close = vbg_input_close;
input->dev.parent = gdev->dev;
input->name = "VirtualBox mouse integration";
input_set_abs_params(input, ABS_X, VMMDEV_MOUSE_RANGE_MIN,
VMMDEV_MOUSE_RANGE_MAX, 0, 0);
input_set_abs_params(input, ABS_Y, VMMDEV_MOUSE_RANGE_MIN,
VMMDEV_MOUSE_RANGE_MAX, 0, 0);
input_set_capability(input, EV_KEY, BTN_MOUSE);
input_set_drvdata(input, gdev);
gdev->input = input;
return input_register_device(gdev->input);
}
static ssize_t host_version_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vbg_dev *gdev = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", gdev->host_version);
}
static ssize_t host_features_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vbg_dev *gdev = dev_get_drvdata(dev);
return sprintf(buf, "%#x\n", gdev->host_features);
}
static DEVICE_ATTR_RO(host_version);
static DEVICE_ATTR_RO(host_features);
/**
* Does the PCI detection and init of the device.
*
* Return: 0 on success, negated errno on failure.
*/
static int vbg_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
struct device *dev = &pci->dev;
resource_size_t io, io_len, mmio, mmio_len;
struct vmmdev_memory *vmmdev;
struct vbg_dev *gdev;
int ret;
gdev = devm_kzalloc(dev, sizeof(*gdev), GFP_KERNEL);
if (!gdev)
return -ENOMEM;
ret = pci_enable_device(pci);
if (ret != 0) {
vbg_err("vboxguest: Error enabling device: %d\n", ret);
return ret;
}
ret = -ENODEV;
io = pci_resource_start(pci, 0);
io_len = pci_resource_len(pci, 0);
if (!io || !io_len) {
vbg_err("vboxguest: Error IO-port resource (0) is missing\n");
goto err_disable_pcidev;
}
if (devm_request_region(dev, io, io_len, DEVICE_NAME) == NULL) {
vbg_err("vboxguest: Error could not claim IO resource\n");
ret = -EBUSY;
goto err_disable_pcidev;
}
mmio = pci_resource_start(pci, 1);
mmio_len = pci_resource_len(pci, 1);
if (!mmio || !mmio_len) {
vbg_err("vboxguest: Error MMIO resource (1) is missing\n");
goto err_disable_pcidev;
}
if (devm_request_mem_region(dev, mmio, mmio_len, DEVICE_NAME) == NULL) {
vbg_err("vboxguest: Error could not claim MMIO resource\n");
ret = -EBUSY;
goto err_disable_pcidev;
}
vmmdev = devm_ioremap(dev, mmio, mmio_len);
if (!vmmdev) {
vbg_err("vboxguest: Error ioremap failed; MMIO addr=%p size=%d\n",
(void *)mmio, (int)mmio_len);
goto err_disable_pcidev;
}
/* Validate MMIO region version and size. */
if (vmmdev->version != VMMDEV_MEMORY_VERSION ||
vmmdev->size < 32 || vmmdev->size > mmio_len) {
vbg_err("vboxguest: Bogus VMMDev memory; version=%08x (expected %08x) size=%d (expected <= %d)\n",
vmmdev->version, VMMDEV_MEMORY_VERSION,
vmmdev->size, (int)mmio_len);
goto err_disable_pcidev;
}
gdev->io_port = io;
gdev->mmio = vmmdev;
gdev->dev = dev;
gdev->misc_device.minor = MISC_DYNAMIC_MINOR;
gdev->misc_device.name = DEVICE_NAME;
gdev->misc_device.fops = &vbg_misc_device_fops;
gdev->misc_device_user.minor = MISC_DYNAMIC_MINOR;
gdev->misc_device_user.name = DEVICE_NAME_USER;
gdev->misc_device_user.fops = &vbg_misc_device_user_fops;
ret = vbg_core_init(gdev, VMMDEV_EVENT_MOUSE_POSITION_CHANGED);
if (ret)
goto err_disable_pcidev;
ret = vbg_create_input_device(gdev);
if (ret) {
vbg_err("vboxguest: Error creating input device: %d\n", ret);
goto err_vbg_core_exit;
}
ret = devm_request_irq(dev, pci->irq, vbg_core_isr, IRQF_SHARED,
DEVICE_NAME, gdev);
if (ret) {
vbg_err("vboxguest: Error requesting irq: %d\n", ret);
goto err_vbg_core_exit;
}
ret = misc_register(&gdev->misc_device);
if (ret) {
vbg_err("vboxguest: Error misc_register %s failed: %d\n",
DEVICE_NAME, ret);
goto err_vbg_core_exit;
}
ret = misc_register(&gdev->misc_device_user);
if (ret) {
vbg_err("vboxguest: Error misc_register %s failed: %d\n",
DEVICE_NAME_USER, ret);
goto err_unregister_misc_device;
}
mutex_lock(&vbg_gdev_mutex);
if (!vbg_gdev)
vbg_gdev = gdev;
else
ret = -EBUSY;
mutex_unlock(&vbg_gdev_mutex);
if (ret) {
vbg_err("vboxguest: Error more then 1 vbox guest pci device\n");
goto err_unregister_misc_device_user;
}
pci_set_drvdata(pci, gdev);
device_create_file(dev, &dev_attr_host_version);
device_create_file(dev, &dev_attr_host_features);
vbg_info("vboxguest: misc device minor %d, IRQ %d, I/O port %x, MMIO at %p (size %d)\n",
gdev->misc_device.minor, pci->irq, gdev->io_port,
(void *)mmio, (int)mmio_len);
return 0;
err_unregister_misc_device_user:
misc_deregister(&gdev->misc_device_user);
err_unregister_misc_device:
misc_deregister(&gdev->misc_device);
err_vbg_core_exit:
vbg_core_exit(gdev);
err_disable_pcidev:
pci_disable_device(pci);
return ret;
}
static void vbg_pci_remove(struct pci_dev *pci)
{
struct vbg_dev *gdev = pci_get_drvdata(pci);
mutex_lock(&vbg_gdev_mutex);
vbg_gdev = NULL;
mutex_unlock(&vbg_gdev_mutex);
device_remove_file(gdev->dev, &dev_attr_host_features);
device_remove_file(gdev->dev, &dev_attr_host_version);
misc_deregister(&gdev->misc_device_user);
misc_deregister(&gdev->misc_device);
vbg_core_exit(gdev);
pci_disable_device(pci);
}
struct vbg_dev *vbg_get_gdev(void)
{
mutex_lock(&vbg_gdev_mutex);
/*
* Note on success we keep the mutex locked until vbg_put_gdev(),
* this stops vbg_pci_remove from removing the device from underneath
* vboxsf. vboxsf will only hold a reference for a short while.
*/
if (vbg_gdev)
return vbg_gdev;
mutex_unlock(&vbg_gdev_mutex);
return ERR_PTR(-ENODEV);
}
EXPORT_SYMBOL(vbg_get_gdev);
void vbg_put_gdev(struct vbg_dev *gdev)
{
WARN_ON(gdev != vbg_gdev);
mutex_unlock(&vbg_gdev_mutex);
}
EXPORT_SYMBOL(vbg_put_gdev);
/**
* Callback for mouse events.
*
* This is called at the end of the ISR, after leaving the event spinlock, if
* VMMDEV_EVENT_MOUSE_POSITION_CHANGED was raised by the host.
*
* @gdev: The device extension.
*/
void vbg_linux_mouse_event(struct vbg_dev *gdev)
{
int rc;
/* Report events to the kernel input device */
gdev->mouse_status_req->mouse_features = 0;
gdev->mouse_status_req->pointer_pos_x = 0;
gdev->mouse_status_req->pointer_pos_y = 0;
rc = vbg_req_perform(gdev, gdev->mouse_status_req);
if (rc >= 0) {
input_report_abs(gdev->input, ABS_X,
gdev->mouse_status_req->pointer_pos_x);
input_report_abs(gdev->input, ABS_Y,
gdev->mouse_status_req->pointer_pos_y);
input_sync(gdev->input);
}
}
static const struct pci_device_id vbg_pci_ids[] = {
{ .vendor = VBOX_VENDORID, .device = VMMDEV_DEVICEID },
{}
};
MODULE_DEVICE_TABLE(pci, vbg_pci_ids);
static struct pci_driver vbg_pci_driver = {
.name = DEVICE_NAME,
.id_table = vbg_pci_ids,
.probe = vbg_pci_probe,
.remove = vbg_pci_remove,
};
module_pci_driver(vbg_pci_driver);
MODULE_AUTHOR("Oracle Corporation");
MODULE_DESCRIPTION("Oracle VM VirtualBox Guest Additions for Linux Module");
MODULE_LICENSE("GPL");
/* SPDX-License-Identifier: (GPL-2.0 OR CDDL-1.0) */
/*
* VBox Guest additions version info, this is used by the host to determine
* supported guest-addition features in some cases. So this will need to be
* synced with vbox upstreams versioning scheme when we implement / port
* new features from the upstream out-of-tree vboxguest driver.
*/
#ifndef __VBOX_VERSION_H__
#define __VBOX_VERSION_H__
/* Last synced October 4th 2017 */
#define VBG_VERSION_MAJOR 5
#define VBG_VERSION_MINOR 2
#define VBG_VERSION_BUILD 0
#define VBG_SVN_REV 68940
#define VBG_VERSION_STRING "5.2.0"
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment