Commit 7f9f2b22 authored by Andy Whitcroft's avatar Andy Whitcroft Committed by Kamal Mostafa
parent bdb9a8f8
Source: http://ports.ubuntu.com/pool/multiverse/v/virtualbox/virtualbox-guest-dkms_5.0.16-dfsg-2_all.deb
Version: 5.0.16-dfsg-2
Source: http://ports.ubuntu.com/pool/multiverse/v/virtualbox/virtualbox-guest-dkms_5.0.18-dfsg-2build1_all.deb
Version: 5.0.18-dfsg-2build1
PACKAGE_NAME="virtualbox-guest"
PACKAGE_VERSION="5.0.16"
PACKAGE_VERSION="5.0.18"
CLEAN="rm -f *.*o"
BUILT_MODULE_NAME[0]="vboxguest"
BUILT_MODULE_LOCATION[0]="vboxguest"
......
/** @file
*
* VBox Host Guest Shared Memory Interface (HGSMI).
* Host/Guest shared part.
*/
/*
* Copyright (C) 2006-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
#ifndef ___VBox_HGSMI_HGSMI_h
#define ___VBox_HGSMI_HGSMI_h
#include <iprt/assert.h>
#include <iprt/types.h>
#include <VBox/HGSMI/HGSMIDefs.h>
#include <VBox/HGSMI/HGSMIChannels.h>
#include <VBox/HGSMI/HGSMIMemAlloc.h>
/*
* Basic mechanism for the HGSMI is to prepare and pass data buffer to the host and the guest.
* Data inside these buffers are opaque for the HGSMI and are interpreted by higher levels.
*
* Every shared memory buffer passed between the guest/host has the following structure:
*
* HGSMIBUFFERHEADER header;
* uint8_t data[header.u32BufferSize];
* HGSMIBUFFERTAIL tail;
*
* Note: Offset of the 'header' in the memory is used for virtual hardware IO.
*
* Buffers are verifyed using the offset and the content of the header and the tail,
* which are constant during a call.
*
* Invalid buffers are ignored.
*
* Actual 'data' is not verifyed, as it is expected that the data can be changed by the
* called function.
*
* Since only the offset of the buffer is passed in a IO operation, the header and tail
* must contain:
* * size of data in this buffer;
* * checksum for buffer verification.
*
* For segmented transfers:
* * the sequence identifier;
* * offset of the current segment in the sequence;
* * total bytes in the transfer.
*
* Additionally contains:
* * the channel ID;
* * the channel information.
*/
typedef struct HGSMIHEAP
{
HGSMIAREA area; /* Description. */
HGSMIMADATA ma; /* Memory allocator */
} HGSMIHEAP;
/* The size of the array of channels. Array indexes are uint8_t. Note: the value must not be changed. */
#define HGSMI_NUMBER_OF_CHANNELS 0x100
/* Channel handler called when the guest submits a buffer. */
typedef DECLCALLBACK(int) FNHGSMICHANNELHANDLER(void *pvHandler, uint16_t u16ChannelInfo, void *pvBuffer, HGSMISIZE cbBuffer);
typedef FNHGSMICHANNELHANDLER *PFNHGSMICHANNELHANDLER;
/* Information about a handler: pfn + context. */
typedef struct _HGSMICHANNELHANDLER
{
PFNHGSMICHANNELHANDLER pfnHandler;
void *pvHandler;
} HGSMICHANNELHANDLER;
/* Channel description. */
typedef struct _HGSMICHANNEL
{
HGSMICHANNELHANDLER handler; /* The channel handler. */
const char *pszName; /* NULL for hardcoded channels or RTStrDup'ed name. */
uint8_t u8Channel; /* The channel id, equal to the channel index in the array. */
uint8_t u8Flags; /* HGSMI_CH_F_* */
} HGSMICHANNEL;
typedef struct _HGSMICHANNELINFO
{
HGSMICHANNEL Channels[HGSMI_NUMBER_OF_CHANNELS]; /* Channel handlers indexed by the channel id.
* The array is accessed under the instance lock.
*/
} HGSMICHANNELINFO;
RT_C_DECLS_BEGIN
DECLINLINE(HGSMIBUFFERHEADER *) HGSMIBufferHeaderFromPtr(void *pvBuffer)
{
return (HGSMIBUFFERHEADER *)pvBuffer;
}
DECLINLINE(uint8_t *) HGSMIBufferDataFromPtr(void *pvBuffer)
{
return (uint8_t *)pvBuffer + sizeof(HGSMIBUFFERHEADER);
}
DECLINLINE(HGSMIBUFFERTAIL *) HGSMIBufferTailFromPtr(void *pvBuffer,
uint32_t u32DataSize)
{
return (HGSMIBUFFERTAIL *)(HGSMIBufferDataFromPtr(pvBuffer) + u32DataSize);
}
DECLINLINE(HGSMISIZE) HGSMIBufferMinimumSize(void)
{
return sizeof(HGSMIBUFFERHEADER) + sizeof(HGSMIBUFFERTAIL);
}
DECLINLINE(HGSMIBUFFERHEADER *) HGSMIBufferHeaderFromData(const void *pvData)
{
return (HGSMIBUFFERHEADER *)((uint8_t *)pvData - sizeof(HGSMIBUFFERHEADER));
}
DECLINLINE(HGSMISIZE) HGSMIBufferRequiredSize(uint32_t u32DataSize)
{
return HGSMIBufferMinimumSize() + u32DataSize;
}
DECLINLINE(HGSMIOFFSET) HGSMIPointerToOffset(const HGSMIAREA *pArea,
const void *pv)
{
return pArea->offBase + (HGSMIOFFSET)((uint8_t *)pv - pArea->pu8Base);
}
DECLINLINE(void *) HGSMIOffsetToPointer(const HGSMIAREA *pArea,
HGSMIOFFSET offBuffer)
{
return pArea->pu8Base + (offBuffer - pArea->offBase);
}
DECLINLINE(uint8_t *) HGSMIBufferDataFromOffset(const HGSMIAREA *pArea,
HGSMIOFFSET offBuffer)
{
void *pvBuffer = HGSMIOffsetToPointer(pArea, offBuffer);
return HGSMIBufferDataFromPtr(pvBuffer);
}
DECLINLINE(HGSMIOFFSET) HGSMIBufferOffsetFromData(const HGSMIAREA *pArea,
void *pvData)
{
HGSMIBUFFERHEADER *pHeader = HGSMIBufferHeaderFromData(pvData);
return HGSMIPointerToOffset(pArea, pHeader);
}
DECLINLINE(uint8_t *) HGSMIBufferDataAndChInfoFromOffset(const HGSMIAREA *pArea,
HGSMIOFFSET offBuffer,
uint16_t *pu16ChannelInfo)
{
HGSMIBUFFERHEADER *pHeader = (HGSMIBUFFERHEADER *)HGSMIOffsetToPointer(pArea, offBuffer);
*pu16ChannelInfo = pHeader->u16ChannelInfo;
return HGSMIBufferDataFromPtr(pHeader);
}
uint32_t HGSMIChecksum(HGSMIOFFSET offBuffer,
const HGSMIBUFFERHEADER *pHeader,
const HGSMIBUFFERTAIL *pTail);
int HGSMIAreaInitialize(HGSMIAREA *pArea,
void *pvBase,
HGSMISIZE cbArea,
HGSMIOFFSET offBase);
void HGSMIAreaClear(HGSMIAREA *pArea);
DECLINLINE(bool) HGSMIAreaContainsOffset(const HGSMIAREA *pArea, HGSMIOFFSET off)
{
return off >= pArea->offBase && off - pArea->offBase < pArea->cbArea;
}
DECLINLINE(bool) HGSMIAreaContainsPointer(const HGSMIAREA *pArea, const void *pv)
{
return (uintptr_t)pv >= (uintptr_t)pArea->pu8Base && (uintptr_t)pv - (uintptr_t)pArea->pu8Base < pArea->cbArea;
}
HGSMIOFFSET HGSMIBufferInitializeSingle(const HGSMIAREA *pArea,
HGSMIBUFFERHEADER *pHeader,
HGSMISIZE cbBuffer,
uint8_t u8Channel,
uint16_t u16ChannelInfo);
int HGSMIHeapSetup(HGSMIHEAP *pHeap,
void *pvBase,
HGSMISIZE cbArea,
HGSMIOFFSET offBase,
const HGSMIENV *pEnv);
void HGSMIHeapDestroy(HGSMIHEAP *pHeap);
void *HGSMIHeapBufferAlloc(HGSMIHEAP *pHeap,
HGSMISIZE cbBuffer);
void HGSMIHeapBufferFree(HGSMIHEAP *pHeap,
void *pvBuf);
void *HGSMIHeapAlloc(HGSMIHEAP *pHeap,
HGSMISIZE cbData,
uint8_t u8Channel,
uint16_t u16ChannelInfo);
void HGSMIHeapFree(HGSMIHEAP *pHeap,
void *pvData);
DECLINLINE(const HGSMIAREA *) HGSMIHeapArea(HGSMIHEAP *pHeap)
{
return &pHeap->area;
}
DECLINLINE(HGSMIOFFSET) HGSMIHeapOffset(HGSMIHEAP *pHeap)
{
return HGSMIHeapArea(pHeap)->offBase;
}
DECLINLINE(HGSMISIZE) HGSMIHeapSize(HGSMIHEAP *pHeap)
{
return HGSMIHeapArea(pHeap)->cbArea;
}
DECLINLINE(HGSMIOFFSET) HGSMIHeapBufferOffset(HGSMIHEAP *pHeap,
void *pvData)
{
return HGSMIBufferOffsetFromData(HGSMIHeapArea(pHeap), pvData);
}
HGSMICHANNEL *HGSMIChannelFindById(HGSMICHANNELINFO *pChannelInfo,
uint8_t u8Channel);
int HGSMIChannelRegister(HGSMICHANNELINFO *pChannelInfo,
uint8_t u8Channel,
const char *pszName,
PFNHGSMICHANNELHANDLER pfnChannelHandler,
void *pvChannelHandler);
int HGSMIBufferProcess(const HGSMIAREA *pArea,
HGSMICHANNELINFO *pChannelInfo,
HGSMIOFFSET offBuffer);
RT_C_DECLS_END
#endif /* !___VBox_HGSMI_HGSMI_h */
/** @file
* VBox Host Guest Shared Memory Interface (HGSMI), sHost/Guest shared part.
*/
/*
* Copyright (C) 2006-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
#ifndef ___VBox_HGSMI_HGSMIChSetup_h
#define ___VBox_HGSMI_HGSMIChSetup_h
#include <VBox/HGSMI/HGSMI.h>
/* HGSMI setup and configuration channel commands and data structures. */
#define HGSMI_CC_HOST_FLAGS_LOCATION 0 /* Tell the host the location of HGSMIHOSTFLAGS structure,
* where the host can write information about pending
* buffers, etc, and which can be quickly polled by
* the guest without a need to port IO.
*/
typedef struct _HGSMIBUFFERLOCATION
{
HGSMIOFFSET offLocation;
HGSMISIZE cbLocation;
} HGSMIBUFFERLOCATION;
AssertCompileSize(HGSMIBUFFERLOCATION, 8);
/* HGSMI setup and configuration data structures. */
/* host->guest commands pending, should be accessed under FIFO lock only */
#define HGSMIHOSTFLAGS_COMMANDS_PENDING 0x1
/* IRQ is fired, should be accessed under VGAState::lock only */
#define HGSMIHOSTFLAGS_IRQ 0x2
#ifdef VBOX_WITH_WDDM
/* one or more guest commands is completed, should be accessed under FIFO lock only */
# define HGSMIHOSTFLAGS_GCOMMAND_COMPLETED 0x4
/* watchdog timer interrupt flag (used for debugging), should be accessed under VGAState::lock only */
# define HGSMIHOSTFLAGS_WATCHDOG 0x8
#endif
/* vsync interrupt flag, should be accessed under VGAState::lock only */
#define HGSMIHOSTFLAGS_VSYNC 0x10
/** monitor hotplug flag, should be accessed under VGAState::lock only */
#define HGSMIHOSTFLAGS_HOTPLUG 0x20
/** Cursor capability state change flag, should be accessed under
* VGAState::lock only. @see VBVACONF32. */
#define HGSMIHOSTFLAGS_CURSOR_CAPABILITIES 0x40
typedef struct _HGSMIHOSTFLAGS
{
/* host flags can be accessed and modified in multiple threads concurrently,
* e.g. CrOpenGL HGCM and GUI threads when to completing HGSMI 3D and Video Accel respectively,
* EMT thread when dealing with HGSMI command processing, etc.
* Besides settings/cleaning flags atomically, some each flag has its own special sync restrictions,
* see commants for flags definitions above */
volatile uint32_t u32HostFlags;
uint32_t au32Reserved[3];
} HGSMIHOSTFLAGS;
AssertCompileSize(HGSMIHOSTFLAGS, 16);
#endif
/** @file
*
* VBox Host Guest Shared Memory Interface (HGSMI).
* Host/Guest shared part.
* Channel identifiers.
*/
/*
* Copyright (C) 2006-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
#ifndef __HGSMIChannels_h__
#define __HGSMIChannels_h__
/* Each channel has an 8 bit identifier. There are a number of predefined
* (hardcoded) channels.
*
* HGSMI_CH_HGSMI channel can be used to map a string channel identifier
* to a free 16 bit numerical value. values are allocated in range
* [HGSMI_CH_STRING_FIRST;HGSMI_CH_STRING_LAST].
*
*/
/* Predefined channel identifiers. Used internally by VBOX to simplify the channel setup. */
#define HGSMI_CH_RESERVED (0x00) /* A reserved channel value. */
#define HGSMI_CH_HGSMI (0x01) /* HGCMI: setup and configuration channel. */
#define HGSMI_CH_VBVA (0x02) /* Graphics: VBVA. */
#define HGSMI_CH_SEAMLESS (0x03) /* Graphics: Seamless with a single guest region. */
#define HGSMI_CH_SEAMLESS2 (0x04) /* Graphics: Seamless with separate host windows. */
#define HGSMI_CH_OPENGL (0x05) /* Graphics: OpenGL HW acceleration. */
/* Dynamically allocated channel identifiers. */
#define HGSMI_CH_STRING_FIRST (0x20) /* The first channel index to be used for string mappings (inclusive). */
#define HGSMI_CH_STRING_LAST (0xff) /* The last channel index for string mappings (inclusive). */
/* Check whether the channel identifier is allocated for a dynamic channel. */
#define HGSMI_IS_DYNAMIC_CHANNEL(_channel) (((uint8_t)(_channel) & 0xE0) != 0)
#endif /* !__HGSMIChannels_h__*/
/** @file
*
* VBox Host Guest Shared Memory Interface (HGSMI).
* Host/Guest shared part: types and defines.
*/
/*
* Copyright (C) 2006-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
#ifndef ___VBox_HGSMI_HGSMIDefs_h
#define ___VBox_HGSMI_HGSMIDefs_h
#include <iprt/assert.h>
#include <iprt/types.h>
/* HGSMI uses 32 bit offsets and sizes. */
typedef uint32_t HGSMISIZE;
typedef uint32_t HGSMIOFFSET;
#define HGSMIOFFSET_VOID ((HGSMIOFFSET)~0)
/* Describes a shared memory area buffer.
* Used for calculations with offsets and for buffers verification.
*/
typedef struct HGSMIAREA
{
uint8_t *pu8Base; /* The starting address of the area. Corresponds to offset 'offBase'. */
HGSMIOFFSET offBase; /* The starting offset of the area. */
HGSMIOFFSET offLast; /* The last valid offset:
* offBase + cbArea - 1 - (sizeof(header) + sizeof(tail)).
*/
HGSMISIZE cbArea; /* Size of the area. */
} HGSMIAREA;
/* The buffer description flags. */
#define HGSMI_BUFFER_HEADER_F_SEQ_MASK 0x03 /* Buffer sequence type mask. */
#define HGSMI_BUFFER_HEADER_F_SEQ_SINGLE 0x00 /* Single buffer, not a part of a sequence. */
#define HGSMI_BUFFER_HEADER_F_SEQ_START 0x01 /* The first buffer in a sequence. */
#define HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE 0x02 /* A middle buffer in a sequence. */
#define HGSMI_BUFFER_HEADER_F_SEQ_END 0x03 /* The last buffer in a sequence. */
#pragma pack(1)
/* 16 bytes buffer header. */
typedef struct HGSMIBUFFERHEADER
{
uint32_t u32DataSize; /* Size of data that follows the header. */
uint8_t u8Flags; /* The buffer description: HGSMI_BUFFER_HEADER_F_* */
uint8_t u8Channel; /* The channel the data must be routed to. */
uint16_t u16ChannelInfo; /* Opaque to the HGSMI, used by the channel. */
union {
uint8_t au8Union[8]; /* Opaque placeholder to make the union 8 bytes. */
struct
{ /* HGSMI_BUFFER_HEADER_F_SEQ_SINGLE */
uint32_t u32Reserved1; /* A reserved field, initialize to 0. */
uint32_t u32Reserved2; /* A reserved field, initialize to 0. */
} Buffer;
struct
{ /* HGSMI_BUFFER_HEADER_F_SEQ_START */
uint32_t u32SequenceNumber; /* The sequence number, the same for all buffers in the sequence. */
uint32_t u32SequenceSize; /* The total size of the sequence. */
} SequenceStart;
struct
{ /* HGSMI_BUFFER_HEADER_F_SEQ_CONTINUE and HGSMI_BUFFER_HEADER_F_SEQ_END */
uint32_t u32SequenceNumber; /* The sequence number, the same for all buffers in the sequence. */
uint32_t u32SequenceOffset; /* Data offset in the entire sequence. */
} SequenceContinue;
} u;
} HGSMIBUFFERHEADER;
/* 8 bytes buffer tail. */
typedef struct HGSMIBUFFERTAIL
{
uint32_t u32Reserved; /* Reserved, must be initialized to 0. */
uint32_t u32Checksum; /* Verifyer for the buffer header and offset and for first 4 bytes of the tail. */
} HGSMIBUFFERTAIL;
#pragma pack()
AssertCompileSize(HGSMIBUFFERHEADER, 16);
AssertCompileSize(HGSMIBUFFERTAIL, 8);
/* The size of the array of channels. Array indexes are uint8_t. Note: the value must not be changed. */
#define HGSMI_NUMBER_OF_CHANNELS 0x100
typedef struct HGSMIENV
{
/* Environment context pointer. */
void *pvEnv;
/* Allocate system memory. */
DECLCALLBACKMEMBER(void *, pfnAlloc)(void *pvEnv, HGSMISIZE cb);
/* Free system memory. */
DECLCALLBACKMEMBER(void, pfnFree)(void *pvEnv, void *pv);
} HGSMIENV;
#endif /* !___VBox_HGSMI_HGSMIDefs_h */
/** @file
*
* VBox Host Guest Shared Memory Interface (HGSMI).
* Memory allocator.
*/
/*
* Copyright (C) 2014-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
#ifndef ___VBox_HGSMI_HGSMIMemAlloc_h
#define ___VBox_HGSMI_HGSMIMemAlloc_h
#include <VBox/HGSMI/HGSMIDefs.h>
#include <iprt/list.h>
/* Descriptor. */
#define HGSMI_MA_DESC_OFFSET_MASK UINT32_C(0xFFFFFFE0)
#define HGSMI_MA_DESC_FREE_MASK UINT32_C(0x00000010)
#define HGSMI_MA_DESC_ORDER_MASK UINT32_C(0x0000000F)
#define HGSMI_MA_DESC_OFFSET(d) ((d) & HGSMI_MA_DESC_OFFSET_MASK)
#define HGSMI_MA_DESC_IS_FREE(d) (((d) & HGSMI_MA_DESC_FREE_MASK) != 0)
#define HGSMI_MA_DESC_ORDER(d) ((d) & HGSMI_MA_DESC_ORDER_MASK)
#define HGSMI_MA_DESC_ORDER_BASE UINT32_C(5)
#define HGSMI_MA_BLOCK_SIZE_MIN (UINT32_C(1) << (HGSMI_MA_DESC_ORDER_BASE + 0))
#define HGSMI_MA_BLOCK_SIZE_MAX (UINT32_C(1) << (HGSMI_MA_DESC_ORDER_BASE + HGSMI_MA_DESC_ORDER_MASK))
/* HGSMI_MA_DESC_ORDER_BASE must correspond to HGSMI_MA_DESC_OFFSET_MASK. */
AssertCompile((~HGSMI_MA_DESC_OFFSET_MASK + 1) == HGSMI_MA_BLOCK_SIZE_MIN);
typedef struct HGSMIMABLOCK
{
RTLISTNODE nodeBlock;
RTLISTNODE nodeFree;
HGSMIOFFSET descriptor;
} HGSMIMABLOCK;
typedef struct HGSMIMADATA
{
HGSMIAREA area;
HGSMIENV env;
HGSMISIZE cbMaxBlock;
uint32_t cBlocks; /* How many blocks in the listBlocks. */
RTLISTANCHOR listBlocks; /* All memory blocks, sorted. */
RTLISTANCHOR aListFreeBlocks[HGSMI_MA_DESC_ORDER_MASK + 1]; /* For free blocks of each order. */
} HGSMIMADATA;
RT_C_DECLS_BEGIN
int HGSMIMAInit(HGSMIMADATA *pMA, const HGSMIAREA *pArea,
HGSMIOFFSET *paDescriptors, uint32_t cDescriptors, HGSMISIZE cbMaxBlock,
const HGSMIENV *pEnv);
void HGSMIMAUninit(HGSMIMADATA *pMA);
void *HGSMIMAAlloc(HGSMIMADATA *pMA, HGSMISIZE cb);
void HGSMIMAFree(HGSMIMADATA *pMA, void *pv);
HGSMIMABLOCK *HGSMIMASearchOffset(HGSMIMADATA *pMA, HGSMIOFFSET off);
uint32_t HGSMIPopCnt32(uint32_t u32);
DECLINLINE(HGSMISIZE) HGSMIMAOrder2Size(HGSMIOFFSET order)
{
return (UINT32_C(1) << (HGSMI_MA_DESC_ORDER_BASE + order));
}
DECLINLINE(HGSMIOFFSET) HGSMIMASize2Order(HGSMISIZE cb)
{
HGSMIOFFSET order = HGSMIPopCnt32(cb - 1) - HGSMI_MA_DESC_ORDER_BASE;
Assert(HGSMIMAOrder2Size(order) == cb);
return order;
}
RT_C_DECLS_END
#endif /* !___VBox_HGSMI_HGSMIMemAlloc_h */
/** @file
* VirtualBox graphics card port I/O definitions
*/
/*
* Copyright (C) 2006-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
#ifndef ___VBox_Hardware_VBoxVideoVBE_h
#define ___VBox_Hardware_VBoxVideoVBE_h
/* GUEST <-> HOST Communication API */
/** @todo FIXME: Either dynamicly ask host for this or put somewhere high in
* physical memory like 0xE0000000. */
#define VBE_DISPI_BANK_ADDRESS 0xA0000
#define VBE_DISPI_BANK_SIZE_KB 64
#define VBE_DISPI_MAX_XRES 16384
#define VBE_DISPI_MAX_YRES 16384
#define VBE_DISPI_MAX_BPP 32
#define VBE_DISPI_IOPORT_INDEX 0x01CE
#define VBE_DISPI_IOPORT_DATA 0x01CF
#define VBE_DISPI_IOPORT_DAC_WRITE_INDEX 0x03C8
#define VBE_DISPI_IOPORT_DAC_DATA 0x03C9
#define VBE_DISPI_INDEX_ID 0x0
#define VBE_DISPI_INDEX_XRES 0x1
#define VBE_DISPI_INDEX_YRES 0x2
#define VBE_DISPI_INDEX_BPP 0x3
#define VBE_DISPI_INDEX_ENABLE 0x4
#define VBE_DISPI_INDEX_BANK 0x5
#define VBE_DISPI_INDEX_VIRT_WIDTH 0x6
#define VBE_DISPI_INDEX_VIRT_HEIGHT 0x7
#define VBE_DISPI_INDEX_X_OFFSET 0x8
#define VBE_DISPI_INDEX_Y_OFFSET 0x9
#define VBE_DISPI_INDEX_VBOX_VIDEO 0xa
#define VBE_DISPI_INDEX_FB_BASE_HI 0xb
#define VBE_DISPI_ID0 0xB0C0
#define VBE_DISPI_ID1 0xB0C1
#define VBE_DISPI_ID2 0xB0C2
#define VBE_DISPI_ID3 0xB0C3
#define VBE_DISPI_ID4 0xB0C4
#define VBE_DISPI_ID_VBOX_VIDEO 0xBE00
/* The VBOX interface id. Indicates support for VBVA shared memory interface. */
#define VBE_DISPI_ID_HGSMI 0xBE01
#define VBE_DISPI_ID_ANYX 0xBE02
#define VBE_DISPI_DISABLED 0x00
#define VBE_DISPI_ENABLED 0x01
#define VBE_DISPI_GETCAPS 0x02
#define VBE_DISPI_8BIT_DAC 0x20
/** @note this definition is a BOCHS legacy, used only in the video BIOS
* code and ignored by the emulated hardware. */
#define VBE_DISPI_LFB_ENABLED 0x40
#define VBE_DISPI_NOCLEARMEM 0x80
#define VGA_PORT_HGSMI_HOST 0x3b0
#define VGA_PORT_HGSMI_GUEST 0x3d0
/* this should be in sync with monitorCount <xsd:maxInclusive value="64"/> in src/VBox/Main/xml/VirtualBox-settings-common.xsd */
#define VBOX_VIDEO_MAX_SCREENS 64
#endif /* !___VBox_Hardware_VBoxVideoVBE_h */
......@@ -124,7 +124,7 @@ DECLVBGL(int) VbglInitClient(void);
/**
* The library termination function.
*/
DECLVBGL(void) VbglTerminate(void);
DECLVBGL(void) VbglTerminate (void);
/** @name Generic request functions.
......@@ -148,7 +148,7 @@ DECLVBGL(int) VbglGRAlloc(VMMDevRequestHeader **ppReq, size_t cbReq, VMMDevReque
*
* @return VBox status code.
*/
DECLVBGL(int) VbglGRPerform(VMMDevRequestHeader *pReq);
DECLVBGL(int) VbglGRPerform (VMMDevRequestHeader *pReq);
/**
* Free the generic request memory.
......@@ -157,7 +157,7 @@ DECLVBGL(int) VbglGRPerform(VMMDevRequestHeader *pReq);
*
* @return VBox status code.
*/
DECLVBGL(void) VbglGRFree(VMMDevRequestHeader *pReq);
DECLVBGL(void) VbglGRFree (VMMDevRequestHeader *pReq);
/**
* Verify the generic request header.
......@@ -169,7 +169,7 @@ DECLVBGL(void) VbglGRFree(VMMDevRequestHeader *pReq);
*
* @return VBox status code.
*/
DECLVBGL(int) VbglGRVerify(const VMMDevRequestHeader *pReq, size_t cbReq);
DECLVBGL(int) VbglGRVerify (const VMMDevRequestHeader *pReq, size_t cbReq);
/** @} */
# ifdef VBOX_WITH_HGCM
......@@ -204,8 +204,8 @@ typedef FNVBGLHGCMCALLBACK *PFNVBGLHGCMCALLBACK;
* @return VBox status code.
*/
DECLR0VBGL(int) VbglR0HGCMInternalConnect(VBoxGuestHGCMConnectInfo *pConnectInfo,
PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData);
DECLR0VBGL(int) VbglR0HGCMInternalConnect (VBoxGuestHGCMConnectInfo *pConnectInfo,
PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData);
/**
......@@ -225,8 +225,8 @@ DECLR0VBGL(int) VbglR0HGCMInternalConnect(VBoxGuestHGCMConnectInfo *pConnectInfo
* @return VBox status code.
*/
DECLR0VBGL(int) VbglR0HGCMInternalDisconnect(VBoxGuestHGCMDisconnectInfo *pDisconnectInfo,
PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData);
DECLR0VBGL(int) VbglR0HGCMInternalDisconnect (VBoxGuestHGCMDisconnectInfo *pDisconnectInfo,
PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData);
/** Call a HGCM service.
*
......@@ -242,8 +242,8 @@ DECLR0VBGL(int) VbglR0HGCMInternalDisconnect(VBoxGuestHGCMDisconnectInfo *pDisco
*
* @return VBox status code.
*/
DECLR0VBGL(int) VbglR0HGCMInternalCall(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData);
DECLR0VBGL(int) VbglR0HGCMInternalCall (VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData);
/** Call a HGCM service. (32 bits packet structure in a 64 bits guest)
*
......@@ -259,8 +259,8 @@ DECLR0VBGL(int) VbglR0HGCMInternalCall(VBoxGuestHGCMCallInfo *pCallInfo, uint32_
*
* @return VBox status code.
*/
DECLR0VBGL(int) VbglR0HGCMInternalCall32(VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData);
DECLR0VBGL(int) VbglR0HGCMInternalCall32 (VBoxGuestHGCMCallInfo *pCallInfo, uint32_t cbCallInfo, uint32_t fFlags,
PFNVBGLHGCMCALLBACK pfnAsyncCallback, void *pvAsyncData, uint32_t u32AsyncData);
/** @name VbglR0HGCMInternalCall flags
* @{ */
......@@ -295,7 +295,7 @@ typedef struct VBGLHGCMHANDLEDATA *VBGLHGCMHANDLE;
*
* @return VBox status code.
*/
DECLVBGL(int) VbglHGCMConnect(VBGLHGCMHANDLE *pHandle, VBoxGuestHGCMConnectInfo *pData);
DECLVBGL(int) VbglHGCMConnect (VBGLHGCMHANDLE *pHandle, VBoxGuestHGCMConnectInfo *pData);
/**
* Connect to a service.
......@@ -305,7 +305,7 @@ DECLVBGL(int) VbglHGCMConnect(VBGLHGCMHANDLE *pHandle, VBoxGuestHGCMConnectInfo
*
* @return VBox status code.
*/
DECLVBGL(int) VbglHGCMDisconnect(VBGLHGCMHANDLE handle, VBoxGuestHGCMDisconnectInfo *pData);
DECLVBGL(int) VbglHGCMDisconnect (VBGLHGCMHANDLE handle, VBoxGuestHGCMDisconnectInfo *pData);
/**
* Call to a service.
......@@ -316,7 +316,7 @@ DECLVBGL(int) VbglHGCMDisconnect(VBGLHGCMHANDLE handle, VBoxGuestHGCMDisconnectI
*
* @return VBox status code.
*/
DECLVBGL(int) VbglHGCMCall(VBGLHGCMHANDLE handle, VBoxGuestHGCMCallInfo *pData, uint32_t cbData);
DECLVBGL(int) VbglHGCMCall (VBGLHGCMHANDLE handle, VBoxGuestHGCMCallInfo *pData, uint32_t cbData);
/**
* Call to a service with user-mode data received by the calling driver from the User-Mode process.
......@@ -328,7 +328,7 @@ DECLVBGL(int) VbglHGCMCall(VBGLHGCMHANDLE handle, VBoxGuestHGCMCallInfo *pData,
*
* @return VBox status code.
*/
DECLVBGL(int) VbglHGCMCallUserData(VBGLHGCMHANDLE handle, VBoxGuestHGCMCallInfo *pData, uint32_t cbData);
DECLVBGL(int) VbglHGCMCallUserData (VBGLHGCMHANDLE handle, VBoxGuestHGCMCallInfo *pData, uint32_t cbData);
/**
* Call to a service with timeout.
......@@ -364,20 +364,20 @@ DECLVBGL(int) VbglR0CrCtlConCallUserData(VBGLCRCTLHANDLE hCtl, struct VBoxGuestH
*
* @returns VBox status code.
*/
DECLVBGL(int) VbglPhysHeapInit(void);
DECLVBGL(int) VbglPhysHeapInit (void);
/**
* Shutdown the heap.
*/
DECLVBGL(void) VbglPhysHeapTerminate(void);
DECLVBGL(void) VbglPhysHeapTerminate (void);
/**
* Allocate a memory block.
*
* @returns Virtual address of the allocated memory block.
* @param cbSize Size of block to be allocated.
* @param cbSize Size of block to be allocated.
*/
DECLVBGL(void *) VbglPhysHeapAlloc(uint32_t cbSize);
DECLVBGL(void *) VbglPhysHeapAlloc (uint32_t cbSize);
/**
* Get physical address of memory block pointed by the virtual address.
......@@ -400,15 +400,15 @@ DECLVBGL(uint32_t) VbglPhysHeapGetPhysAddr(void *pv);
*/
DECLVBGL(void) VbglPhysHeapFree(void *pv);
DECLVBGL(int) VbglQueryVMMDevMemory(VMMDevMemory **ppVMMDevMemory);
DECLR0VBGL(bool) VbglR0CanUsePhysPageList(void);
DECLVBGL(int) VbglQueryVMMDevMemory (VMMDevMemory **ppVMMDevMemory);
DECLR0VBGL(bool) VbglR0CanUsePhysPageList(void);
# ifndef VBOX_GUEST
/** @name Mouse
* @{ */
DECLVBGL(int) VbglSetMouseNotifyCallback(PFNVBOXGUESTMOUSENOTIFY pfnNotify, void *pvUser);
DECLVBGL(int) VbglGetMouseStatus(uint32_t *pfFeatures, uint32_t *px, uint32_t *py);
DECLVBGL(int) VbglSetMouseStatus(uint32_t fFeatures);
DECLVBGL(int) VbglSetMouseNotifyCallback(PFNVBOXGUESTMOUSENOTIFY pfnNotify, void *pvUser);
DECLVBGL(int) VbglGetMouseStatus(uint32_t *pfFeatures, uint32_t *px, uint32_t *py);
DECLVBGL(int) VbglSetMouseStatus(uint32_t fFeatures);
/** @} */
# endif /* VBOX_GUEST */
......@@ -491,7 +491,7 @@ VBGLR3DECL(int) VbglR3SetPointerShapeReq(struct VMMDevReqMousePointer *pReq)
* @{ */
/** The folder for the video mode hint unix domain socket on Unix-like guests.
* @note This can be safely changed as all users are rebuilt in lock-step. */
#define VBGLR3HOSTDISPSOCKETPATH "/tmp/.VBoxService"
#define VBGLR3HOSTDISPSOCKETPATH "/tmp/.VBoxService"
/** The path to the video mode hint unix domain socket on Unix-like guests. */
#define VBGLR3HOSTDISPSOCKET VBGLR3VIDEOMODEHINTSOCKETPATH "/VideoModeHint"
......@@ -757,12 +757,16 @@ typedef struct VBGLR3GUESTDNDCMDCTX
* a second communication channel, e.g. via TCP/IP.
* Use a union for the HGCM stuff then. */
/** IN: HGCM client ID to use for communication. */
/** HGCM client ID to use for communication. */
uint32_t uClientID;
/** IN: Protocol version to use. */
/** The VM's current session ID. */
uint64_t uSessionID;
/** Protocol version to use. */
uint32_t uProtocol;
/** OUT: Number of parameters retrieved. */
/** Number of parameters retrieved for the current command. */
uint32_t uNumParms;
/** Max chunk size (in bytes) for data transfers. */
uint32_t cbMaxChunkSize;
} VBGLR3GUESTDNDCMDCTX, *PVBGLR3GUESTDNDCMDCTX;
typedef struct VBGLR3DNDHGCMEVENT
......@@ -770,7 +774,7 @@ typedef struct VBGLR3DNDHGCMEVENT
uint32_t uType; /** The event type this struct contains. */
uint32_t uScreenId; /** Screen ID this request belongs to. */
char *pszFormats; /** Format list (\r\n separated). */
uint32_t cbFormats; /** Size of pszFormats (\0 included). */
uint32_t cbFormats; /** Size (in bytes) of pszFormats (\0 included). */
union
{
struct
......@@ -783,7 +787,7 @@ typedef struct VBGLR3DNDHGCMEVENT
struct
{
void *pvData; /** Data request. */
size_t cbData; /** Size of pvData. */
uint32_t cbData; /** Size (in bytes) of pvData. */
} b; /** Values used in drop data event type. */
} u;
} VBGLR3DNDHGCMEVENT;
......@@ -792,14 +796,13 @@ typedef const PVBGLR3DNDHGCMEVENT CPVBGLR3DNDHGCMEVENT;
VBGLR3DECL(int) VbglR3DnDConnect(PVBGLR3GUESTDNDCMDCTX pCtx);
VBGLR3DECL(int) VbglR3DnDDisconnect(PVBGLR3GUESTDNDCMDCTX pCtx);
VBGLR3DECL(int) VbglR3DnDProcessNextMessage(PVBGLR3GUESTDNDCMDCTX pCtx, CPVBGLR3DNDHGCMEVENT pEvent);
VBGLR3DECL(int) VbglR3DnDRecvNextMsg(PVBGLR3GUESTDNDCMDCTX pCtx, CPVBGLR3DNDHGCMEVENT pEvent);
VBGLR3DECL(int) VbglR3DnDHGAcknowledgeOperation(PVBGLR3GUESTDNDCMDCTX pCtx, uint32_t uAction);
VBGLR3DECL(int) VbglR3DnDHGRequestData(PVBGLR3GUESTDNDCMDCTX pCtx, const char *pszFormat);
VBGLR3DECL(int) VbglR3DnDHGSetProgress(PVBGLR3GUESTDNDCMDCTX pCtx, uint32_t uStatus, uint8_t uPercent, int rcErr);
VBGLR3DECL(int) VbglR3DnDHGSendAckOp(PVBGLR3GUESTDNDCMDCTX pCtx, uint32_t uAction);
VBGLR3DECL(int) VbglR3DnDHGSendReqData(PVBGLR3GUESTDNDCMDCTX pCtx, const char *pcszFormat);
VBGLR3DECL(int) VbglR3DnDHGSendProgress(PVBGLR3GUESTDNDCMDCTX pCtx, uint32_t uStatus, uint8_t uPercent, int rcErr);
# ifdef VBOX_WITH_DRAG_AND_DROP_GH
VBGLR3DECL(int) VbglR3DnDGHAcknowledgePending(PVBGLR3GUESTDNDCMDCTX pCtx, uint32_t uDefAction,
uint32_t uAllActions, const char *pszFormats);
VBGLR3DECL(int) VbglR3DnDGHSendAckPending(PVBGLR3GUESTDNDCMDCTX pCtx, uint32_t uDefAction, uint32_t uAllActions, const char* pcszFormats, uint32_t cbFormats);
VBGLR3DECL(int) VbglR3DnDGHSendData(PVBGLR3GUESTDNDCMDCTX pCtx, const char *pszFormat, void *pvData, uint32_t cbData);
VBGLR3DECL(int) VbglR3DnDGHSendError(PVBGLR3GUESTDNDCMDCTX pCtx, int rcOp);
# endif /* VBOX_WITH_DRAG_AND_DROP_GH */
......@@ -838,6 +841,12 @@ VBGLR3DECL(int) VbglR3WriteVideoMode(unsigned cDisplay, unsigned cx,
unsigned y, unsigned fEnabled);
/** @} */
/** @name Generic HGCM
* @{ */
VBGLR3DECL(int) VbglR3HGCMConnect(const char *pszServiceName, HGCMCLIENTID *pidClient);
VBGLR3DECL(int) VbglR3HGCMDisconnect(HGCMCLIENTID idClient);
/** @} */
#endif /* IN_RING3 */
/** @} */
......
/** @file
* VirtualBox Video interface.
*/
/*
* Copyright (C) 2006-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
#ifndef ___VBox_VBoxVideo_h
#define ___VBox_VBoxVideo_h
#include <VBox/VMMDev.h>
#include <VBox/Hardware/VBoxVideoVBE.h>
#include <iprt/cdefs.h>
#include <iprt/types.h>
/*
* The last 4096 bytes of the guest VRAM contains the generic info for all
* DualView chunks: sizes and offsets of chunks. This is filled by miniport.
*
* Last 4096 bytes of each chunk contain chunk specific data: framebuffer info,
* etc. This is used exclusively by the corresponding instance of a display driver.
*
* The VRAM layout:
* Last 4096 bytes - Adapter information area.
* 4096 bytes aligned miniport heap (value specified in the config rouded up).
* Slack - what left after dividing the VRAM.
* 4096 bytes aligned framebuffers:
* last 4096 bytes of each framebuffer is the display information area.
*
* The Virtual Graphics Adapter information in the guest VRAM is stored by the
* guest video driver using structures prepended by VBOXVIDEOINFOHDR.
*
* When the guest driver writes dword 0 to the VBE_DISPI_INDEX_VBOX_VIDEO
* the host starts to process the info. The first element at the start of
* the 4096 bytes region should be normally be a LINK that points to
* actual information chain. That way the guest driver can have some
* fixed layout of the information memory block and just rewrite
* the link to point to relevant memory chain.
*
* The processing stops at the END element.
*
* The host can access the memory only when the port IO is processed.
* All data that will be needed later must be copied from these 4096 bytes.
* But other VRAM can be used by host until the mode is disabled.
*
* The guest driver writes dword 0xffffffff to the VBE_DISPI_INDEX_VBOX_VIDEO
* to disable the mode.
*
* VBE_DISPI_INDEX_VBOX_VIDEO is used to read the configuration information
* from the host and issue commands to the host.
*
* The guest writes the VBE_DISPI_INDEX_VBOX_VIDEO index register, the the
* following operations with the VBE data register can be performed:
*
* Operation Result
* write 16 bit value NOP
* read 16 bit value count of monitors
* write 32 bit value sets the vbox command value and the command processed by the host
* read 32 bit value result of the last vbox command is returned
*/
#define VBOX_VIDEO_PRIMARY_SCREEN 0
#define VBOX_VIDEO_NO_SCREEN ~0
/* The size of the information. */
/*
* The minimum HGSMI heap size is PAGE_SIZE (4096 bytes) and is a restriction of the
* runtime heapsimple API. Use minimum 2 pages here, because the info area also may
* contain other data (for example HGSMIHOSTFLAGS structure).
*/
#ifndef VBOX_XPDM_MINIPORT
# define VBVA_ADAPTER_INFORMATION_SIZE (64*_1K)
#else
#define VBVA_ADAPTER_INFORMATION_SIZE (16*_1K)
#define VBVA_DISPLAY_INFORMATION_SIZE (64*_1K)
#endif
#define VBVA_MIN_BUFFER_SIZE (64*_1K)
/* The value for port IO to let the adapter to interpret the adapter memory. */
#define VBOX_VIDEO_DISABLE_ADAPTER_MEMORY 0xFFFFFFFF
/* The value for port IO to let the adapter to interpret the adapter memory. */
#define VBOX_VIDEO_INTERPRET_ADAPTER_MEMORY 0x00000000
/* The value for port IO to let the adapter to interpret the display memory.
* The display number is encoded in low 16 bits.
*/
#define VBOX_VIDEO_INTERPRET_DISPLAY_MEMORY_BASE 0x00010000
/* The end of the information. */
#define VBOX_VIDEO_INFO_TYPE_END 0
/* Instructs the host to fetch the next VBOXVIDEOINFOHDR at the given offset of VRAM. */
#define VBOX_VIDEO_INFO_TYPE_LINK 1
/* Information about a display memory position. */
#define VBOX_VIDEO_INFO_TYPE_DISPLAY 2
/* Information about a screen. */
#define VBOX_VIDEO_INFO_TYPE_SCREEN 3
/* Information about host notifications for the driver. */
#define VBOX_VIDEO_INFO_TYPE_HOST_EVENTS 4
/* Information about non-volatile guest VRAM heap. */
#define VBOX_VIDEO_INFO_TYPE_NV_HEAP 5
/* VBVA enable/disable. */
#define VBOX_VIDEO_INFO_TYPE_VBVA_STATUS 6
/* VBVA flush. */
#define VBOX_VIDEO_INFO_TYPE_VBVA_FLUSH 7
/* Query configuration value. */
#define VBOX_VIDEO_INFO_TYPE_QUERY_CONF32 8
#pragma pack(1)
typedef struct VBOXVIDEOINFOHDR
{
uint8_t u8Type;
uint8_t u8Reserved;
uint16_t u16Length;
} VBOXVIDEOINFOHDR;
typedef struct VBOXVIDEOINFOLINK
{
/* Relative offset in VRAM */
int32_t i32Offset;
} VBOXVIDEOINFOLINK;
/* Resides in adapter info memory. Describes a display VRAM chunk. */
typedef struct VBOXVIDEOINFODISPLAY
{
/* Index of the framebuffer assigned by guest. */
uint32_t u32Index;
/* Absolute offset in VRAM of the framebuffer to be displayed on the monitor. */
uint32_t u32Offset;
/* The size of the memory that can be used for the screen. */
uint32_t u32FramebufferSize;
/* The size of the memory that is used for the Display information.
* The information is at u32Offset + u32FramebufferSize
*/
uint32_t u32InformationSize;
} VBOXVIDEOINFODISPLAY;
/* Resides in display info area, describes the current video mode. */
#define VBOX_VIDEO_INFO_SCREEN_F_NONE 0x00
#define VBOX_VIDEO_INFO_SCREEN_F_ACTIVE 0x01
typedef struct VBOXVIDEOINFOSCREEN
{
/* Physical X origin relative to the primary screen. */
int32_t xOrigin;
/* Physical Y origin relative to the primary screen. */
int32_t yOrigin;
/* The scan line size in bytes. */
uint32_t u32LineSize;
/* Width of the screen. */
uint16_t u16Width;
/* Height of the screen. */
uint16_t u16Height;
/* Color depth. */
uint8_t bitsPerPixel;
/* VBOX_VIDEO_INFO_SCREEN_F_* */
uint8_t u8Flags;
} VBOXVIDEOINFOSCREEN;
/* The guest initializes the structure to 0. The positions of the structure in the
* display info area must not be changed, host will update the structure. Guest checks
* the events and modifies the structure as a response to host.
*/
#define VBOX_VIDEO_INFO_HOST_EVENTS_F_NONE 0x00000000
#define VBOX_VIDEO_INFO_HOST_EVENTS_F_VRDP_RESET 0x00000080
typedef struct VBOXVIDEOINFOHOSTEVENTS
{
/* Host events. */
uint32_t fu32Events;
} VBOXVIDEOINFOHOSTEVENTS;
/* Resides in adapter info memory. Describes the non-volatile VRAM heap. */
typedef struct VBOXVIDEOINFONVHEAP
{
/* Absolute offset in VRAM of the start of the heap. */
uint32_t u32HeapOffset;
/* The size of the heap. */
uint32_t u32HeapSize;
} VBOXVIDEOINFONVHEAP;
/* Display information area. */
typedef struct VBOXVIDEOINFOVBVASTATUS
{
/* Absolute offset in VRAM of the start of the VBVA QUEUE. 0 to disable VBVA. */
uint32_t u32QueueOffset;
/* The size of the VBVA QUEUE. 0 to disable VBVA. */
uint32_t u32QueueSize;
} VBOXVIDEOINFOVBVASTATUS;
typedef struct VBOXVIDEOINFOVBVAFLUSH
{
uint32_t u32DataStart;
uint32_t u32DataEnd;
} VBOXVIDEOINFOVBVAFLUSH;
#define VBOX_VIDEO_QCI32_MONITOR_COUNT 0
#define VBOX_VIDEO_QCI32_OFFSCREEN_HEAP_SIZE 1
typedef struct VBOXVIDEOINFOQUERYCONF32
{
uint32_t u32Index;
uint32_t u32Value;
} VBOXVIDEOINFOQUERYCONF32;
#pragma pack()
#ifdef VBOX_WITH_VIDEOHWACCEL
#pragma pack(1)
#define VBOXVHWA_VERSION_MAJ 0
#define VBOXVHWA_VERSION_MIN 0
#define VBOXVHWA_VERSION_BLD 6
#define VBOXVHWA_VERSION_RSV 0
typedef enum
{
VBOXVHWACMD_TYPE_SURF_CANCREATE = 1,
VBOXVHWACMD_TYPE_SURF_CREATE,
VBOXVHWACMD_TYPE_SURF_DESTROY,
VBOXVHWACMD_TYPE_SURF_LOCK,
VBOXVHWACMD_TYPE_SURF_UNLOCK,
VBOXVHWACMD_TYPE_SURF_BLT,
VBOXVHWACMD_TYPE_SURF_FLIP,
VBOXVHWACMD_TYPE_SURF_OVERLAY_UPDATE,
VBOXVHWACMD_TYPE_SURF_OVERLAY_SETPOSITION,
VBOXVHWACMD_TYPE_SURF_COLORKEY_SET,
VBOXVHWACMD_TYPE_QUERY_INFO1,
VBOXVHWACMD_TYPE_QUERY_INFO2,
VBOXVHWACMD_TYPE_ENABLE,
VBOXVHWACMD_TYPE_DISABLE,
VBOXVHWACMD_TYPE_HH_CONSTRUCT,
VBOXVHWACMD_TYPE_HH_RESET
#ifdef VBOX_WITH_WDDM
, VBOXVHWACMD_TYPE_SURF_GETINFO
, VBOXVHWACMD_TYPE_SURF_COLORFILL
#endif
, VBOXVHWACMD_TYPE_HH_DISABLE
, VBOXVHWACMD_TYPE_HH_ENABLE
, VBOXVHWACMD_TYPE_HH_SAVESTATE_SAVEBEGIN
, VBOXVHWACMD_TYPE_HH_SAVESTATE_SAVEEND
, VBOXVHWACMD_TYPE_HH_SAVESTATE_SAVEPERFORM
, VBOXVHWACMD_TYPE_HH_SAVESTATE_LOADPERFORM
} VBOXVHWACMD_TYPE;
/* the command processing was asynch, set by the host to indicate asynch command completion
* must not be cleared once set, the command completion is performed by issuing a host->guest completion command
* while keeping this flag unchanged */
#define VBOXVHWACMD_FLAG_HG_ASYNCH 0x00010000
/* asynch completion is performed by issuing the event */
#define VBOXVHWACMD_FLAG_GH_ASYNCH_EVENT 0x00000001
/* issue interrupt on asynch completion */
#define VBOXVHWACMD_FLAG_GH_ASYNCH_IRQ 0x00000002
/* guest does not do any op on completion of this command, the host may copy the command and indicate that it does not need the command anymore
* by setting the VBOXVHWACMD_FLAG_HG_ASYNCH_RETURNED flag */
#define VBOXVHWACMD_FLAG_GH_ASYNCH_NOCOMPLETION 0x00000004
/* the host has copied the VBOXVHWACMD_FLAG_GH_ASYNCH_NOCOMPLETION command and returned it to the guest */
#define VBOXVHWACMD_FLAG_HG_ASYNCH_RETURNED 0x00020000
/* this is the host->host cmd, i.e. a configuration command posted by the host to the framebuffer */
#define VBOXVHWACMD_FLAG_HH_CMD 0x10000000
typedef struct VBOXVHWACMD
{
VBOXVHWACMD_TYPE enmCmd; /* command type */
volatile int32_t rc; /* command result */
int32_t iDisplay; /* display index */
volatile int32_t Flags; /* ored VBOXVHWACMD_FLAG_xxx values */
uint64_t GuestVBVAReserved1; /* field internally used by the guest VBVA cmd handling, must NOT be modified by clients */
uint64_t GuestVBVAReserved2; /* field internally used by the guest VBVA cmd handling, must NOT be modified by clients */
volatile uint32_t cRefs;
int32_t Reserved;
union
{
struct VBOXVHWACMD *pNext;
uint32_t offNext;
uint64_t Data; /* the body is 64-bit aligned */
} u;
char body[1];
} VBOXVHWACMD;
#define VBOXVHWACMD_HEADSIZE() (RT_OFFSETOF(VBOXVHWACMD, body))
#define VBOXVHWACMD_SIZE_FROMBODYSIZE(_s) (VBOXVHWACMD_HEADSIZE() + (_s))
#define VBOXVHWACMD_SIZE(_tCmd) (VBOXVHWACMD_SIZE_FROMBODYSIZE(sizeof(_tCmd)))
typedef unsigned int VBOXVHWACMD_LENGTH;
typedef uint64_t VBOXVHWA_SURFHANDLE;
#define VBOXVHWA_SURFHANDLE_INVALID 0ULL
#define VBOXVHWACMD_BODY(_p, _t) ((_t*)(_p)->body)
#define VBOXVHWACMD_HEAD(_pb) ((VBOXVHWACMD*)((uint8_t *)(_pb) - RT_OFFSETOF(VBOXVHWACMD, body)))
typedef struct VBOXVHWA_RECTL
{
int32_t left;
int32_t top;
int32_t right;
int32_t bottom;
} VBOXVHWA_RECTL;
typedef struct VBOXVHWA_COLORKEY
{
uint32_t low;
uint32_t high;
} VBOXVHWA_COLORKEY;
typedef struct VBOXVHWA_PIXELFORMAT
{
uint32_t flags;
uint32_t fourCC;
union
{
uint32_t rgbBitCount;
uint32_t yuvBitCount;
} c;
union
{
uint32_t rgbRBitMask;
uint32_t yuvYBitMask;
} m1;
union
{
uint32_t rgbGBitMask;
uint32_t yuvUBitMask;
} m2;
union
{
uint32_t rgbBBitMask;
uint32_t yuvVBitMask;
} m3;
union
{
uint32_t rgbABitMask;
} m4;
uint32_t Reserved;
} VBOXVHWA_PIXELFORMAT;
typedef struct VBOXVHWA_SURFACEDESC
{
uint32_t flags;
uint32_t height;
uint32_t width;
uint32_t pitch;
uint32_t sizeX;
uint32_t sizeY;
uint32_t cBackBuffers;
uint32_t Reserved;
VBOXVHWA_COLORKEY DstOverlayCK;
VBOXVHWA_COLORKEY DstBltCK;
VBOXVHWA_COLORKEY SrcOverlayCK;
VBOXVHWA_COLORKEY SrcBltCK;
VBOXVHWA_PIXELFORMAT PixelFormat;
uint32_t surfCaps;
uint32_t Reserved2;
VBOXVHWA_SURFHANDLE hSurf;
uint64_t offSurface;
} VBOXVHWA_SURFACEDESC;
typedef struct VBOXVHWA_BLTFX
{
uint32_t flags;
uint32_t rop;
uint32_t rotationOp;
uint32_t rotation;
uint32_t fillColor;
uint32_t Reserved;
VBOXVHWA_COLORKEY DstCK;
VBOXVHWA_COLORKEY SrcCK;
} VBOXVHWA_BLTFX;
typedef struct VBOXVHWA_OVERLAYFX
{
uint32_t flags;
uint32_t Reserved1;
uint32_t fxFlags;
uint32_t Reserved2;
VBOXVHWA_COLORKEY DstCK;
VBOXVHWA_COLORKEY SrcCK;
} VBOXVHWA_OVERLAYFX;
#define VBOXVHWA_CAPS_BLT 0x00000040
#define VBOXVHWA_CAPS_BLTCOLORFILL 0x04000000
#define VBOXVHWA_CAPS_BLTFOURCC 0x00000100
#define VBOXVHWA_CAPS_BLTSTRETCH 0x00000200
#define VBOXVHWA_CAPS_BLTQUEUE 0x00000080
#define VBOXVHWA_CAPS_OVERLAY 0x00000800
#define VBOXVHWA_CAPS_OVERLAYFOURCC 0x00002000
#define VBOXVHWA_CAPS_OVERLAYSTRETCH 0x00004000
#define VBOXVHWA_CAPS_OVERLAYCANTCLIP 0x00001000
#define VBOXVHWA_CAPS_COLORKEY 0x00400000
#define VBOXVHWA_CAPS_COLORKEYHWASSIST 0x01000000
#define VBOXVHWA_SCAPS_BACKBUFFER 0x00000004
#define VBOXVHWA_SCAPS_COMPLEX 0x00000008
#define VBOXVHWA_SCAPS_FLIP 0x00000010
#define VBOXVHWA_SCAPS_FRONTBUFFER 0x00000020
#define VBOXVHWA_SCAPS_OFFSCREENPLAIN 0x00000040
#define VBOXVHWA_SCAPS_OVERLAY 0x00000080
#define VBOXVHWA_SCAPS_PRIMARYSURFACE 0x00000200
#define VBOXVHWA_SCAPS_SYSTEMMEMORY 0x00000800
#define VBOXVHWA_SCAPS_VIDEOMEMORY 0x00004000
#define VBOXVHWA_SCAPS_VISIBLE 0x00008000
#define VBOXVHWA_SCAPS_LOCALVIDMEM 0x10000000
#define VBOXVHWA_PF_PALETTEINDEXED8 0x00000020
#define VBOXVHWA_PF_RGB 0x00000040
#define VBOXVHWA_PF_RGBTOYUV 0x00000100
#define VBOXVHWA_PF_YUV 0x00000200
#define VBOXVHWA_PF_FOURCC 0x00000004
#define VBOXVHWA_LOCK_DISCARDCONTENTS 0x00002000
#define VBOXVHWA_CFG_ENABLED 0x00000001
#define VBOXVHWA_SD_BACKBUFFERCOUNT 0x00000020
#define VBOXVHWA_SD_CAPS 0x00000001
#define VBOXVHWA_SD_CKDESTBLT 0x00004000
#define VBOXVHWA_SD_CKDESTOVERLAY 0x00002000
#define VBOXVHWA_SD_CKSRCBLT 0x00010000
#define VBOXVHWA_SD_CKSRCOVERLAY 0x00008000
#define VBOXVHWA_SD_HEIGHT 0x00000002
#define VBOXVHWA_SD_PITCH 0x00000008
#define VBOXVHWA_SD_PIXELFORMAT 0x00001000
/*#define VBOXVHWA_SD_REFRESHRATE 0x00040000*/
#define VBOXVHWA_SD_WIDTH 0x00000004
#define VBOXVHWA_CKEYCAPS_DESTBLT 0x00000001
#define VBOXVHWA_CKEYCAPS_DESTBLTCLRSPACE 0x00000002
#define VBOXVHWA_CKEYCAPS_DESTBLTCLRSPACEYUV 0x00000004
#define VBOXVHWA_CKEYCAPS_DESTBLTYUV 0x00000008
#define VBOXVHWA_CKEYCAPS_DESTOVERLAY 0x00000010
#define VBOXVHWA_CKEYCAPS_DESTOVERLAYCLRSPACE 0x00000020
#define VBOXVHWA_CKEYCAPS_DESTOVERLAYCLRSPACEYUV 0x00000040
#define VBOXVHWA_CKEYCAPS_DESTOVERLAYONEACTIVE 0x00000080
#define VBOXVHWA_CKEYCAPS_DESTOVERLAYYUV 0x00000100
#define VBOXVHWA_CKEYCAPS_SRCBLT 0x00000200
#define VBOXVHWA_CKEYCAPS_SRCBLTCLRSPACE 0x00000400
#define VBOXVHWA_CKEYCAPS_SRCBLTCLRSPACEYUV 0x00000800
#define VBOXVHWA_CKEYCAPS_SRCBLTYUV 0x00001000
#define VBOXVHWA_CKEYCAPS_SRCOVERLAY 0x00002000
#define VBOXVHWA_CKEYCAPS_SRCOVERLAYCLRSPACE 0x00004000
#define VBOXVHWA_CKEYCAPS_SRCOVERLAYCLRSPACEYUV 0x00008000
#define VBOXVHWA_CKEYCAPS_SRCOVERLAYONEACTIVE 0x00010000
#define VBOXVHWA_CKEYCAPS_SRCOVERLAYYUV 0x00020000
#define VBOXVHWA_CKEYCAPS_NOCOSTOVERLAY 0x00040000
#define VBOXVHWA_BLT_COLORFILL 0x00000400
#define VBOXVHWA_BLT_DDFX 0x00000800
#define VBOXVHWA_BLT_EXTENDED_FLAGS 0x40000000
#define VBOXVHWA_BLT_EXTENDED_LINEAR_CONTENT 0x00000004
#define VBOXVHWA_BLT_EXTENDED_PRESENTATION_STRETCHFACTOR 0x00000010
#define VBOXVHWA_BLT_KEYDESTOVERRIDE 0x00004000
#define VBOXVHWA_BLT_KEYSRCOVERRIDE 0x00010000
#define VBOXVHWA_BLT_LAST_PRESENTATION 0x20000000
#define VBOXVHWA_BLT_PRESENTATION 0x10000000
#define VBOXVHWA_BLT_ROP 0x00020000
#define VBOXVHWA_OVER_DDFX 0x00080000
#define VBOXVHWA_OVER_HIDE 0x00000200
#define VBOXVHWA_OVER_KEYDEST 0x00000400
#define VBOXVHWA_OVER_KEYDESTOVERRIDE 0x00000800
#define VBOXVHWA_OVER_KEYSRC 0x00001000
#define VBOXVHWA_OVER_KEYSRCOVERRIDE 0x00002000
#define VBOXVHWA_OVER_SHOW 0x00004000
#define VBOXVHWA_CKEY_COLORSPACE 0x00000001
#define VBOXVHWA_CKEY_DESTBLT 0x00000002
#define VBOXVHWA_CKEY_DESTOVERLAY 0x00000004
#define VBOXVHWA_CKEY_SRCBLT 0x00000008
#define VBOXVHWA_CKEY_SRCOVERLAY 0x00000010
#define VBOXVHWA_BLT_ARITHSTRETCHY 0x00000001
#define VBOXVHWA_BLT_MIRRORLEFTRIGHT 0x00000002
#define VBOXVHWA_BLT_MIRRORUPDOWN 0x00000004
#define VBOXVHWA_OVERFX_ARITHSTRETCHY 0x00000001
#define VBOXVHWA_OVERFX_MIRRORLEFTRIGHT 0x00000002
#define VBOXVHWA_OVERFX_MIRRORUPDOWN 0x00000004
#define VBOXVHWA_CAPS2_CANRENDERWINDOWED 0x00080000
#define VBOXVHWA_CAPS2_WIDESURFACES 0x00001000
#define VBOXVHWA_CAPS2_COPYFOURCC 0x00008000
/*#define VBOXVHWA_CAPS2_FLIPINTERVAL 0x00200000*/
/*#define VBOXVHWA_CAPS2_FLIPNOVSYNC 0x00400000*/
#define VBOXVHWA_OFFSET64_VOID (UINT64_MAX)
typedef struct VBOXVHWA_VERSION
{
uint32_t maj;
uint32_t min;
uint32_t bld;
uint32_t reserved;
} VBOXVHWA_VERSION;
#define VBOXVHWA_VERSION_INIT(_pv) do { \
(_pv)->maj = VBOXVHWA_VERSION_MAJ; \
(_pv)->min = VBOXVHWA_VERSION_MIN; \
(_pv)->bld = VBOXVHWA_VERSION_BLD; \
(_pv)->reserved = VBOXVHWA_VERSION_RSV; \
} while(0)
typedef struct VBOXVHWACMD_QUERYINFO1
{
union
{
struct
{
VBOXVHWA_VERSION guestVersion;
} in;
struct
{
uint32_t cfgFlags;
uint32_t caps;
uint32_t caps2;
uint32_t colorKeyCaps;
uint32_t stretchCaps;
uint32_t surfaceCaps;
uint32_t numOverlays;
uint32_t curOverlays;
uint32_t numFourCC;
uint32_t reserved;
} out;
} u;
} VBOXVHWACMD_QUERYINFO1;
typedef struct VBOXVHWACMD_QUERYINFO2
{
uint32_t numFourCC;
uint32_t FourCC[1];
} VBOXVHWACMD_QUERYINFO2;
#define VBOXVHWAINFO2_SIZE(_cFourCC) RT_OFFSETOF(VBOXVHWACMD_QUERYINFO2, FourCC[_cFourCC])
typedef struct VBOXVHWACMD_SURF_CANCREATE
{
VBOXVHWA_SURFACEDESC SurfInfo;
union
{
struct
{
uint32_t bIsDifferentPixelFormat;
uint32_t Reserved;
} in;
struct
{
int32_t ErrInfo;
} out;
} u;
} VBOXVHWACMD_SURF_CANCREATE;
typedef struct VBOXVHWACMD_SURF_CREATE
{
VBOXVHWA_SURFACEDESC SurfInfo;
} VBOXVHWACMD_SURF_CREATE;
#ifdef VBOX_WITH_WDDM
typedef struct VBOXVHWACMD_SURF_GETINFO
{
VBOXVHWA_SURFACEDESC SurfInfo;
} VBOXVHWACMD_SURF_GETINFO;
#endif
typedef struct VBOXVHWACMD_SURF_DESTROY
{
union
{
struct
{
VBOXVHWA_SURFHANDLE hSurf;
} in;
} u;
} VBOXVHWACMD_SURF_DESTROY;
typedef struct VBOXVHWACMD_SURF_LOCK
{
union
{
struct
{
VBOXVHWA_SURFHANDLE hSurf;
uint64_t offSurface;
uint32_t flags;
uint32_t rectValid;
VBOXVHWA_RECTL rect;
} in;
} u;
} VBOXVHWACMD_SURF_LOCK;
typedef struct VBOXVHWACMD_SURF_UNLOCK
{
union
{
struct
{
VBOXVHWA_SURFHANDLE hSurf;
uint32_t xUpdatedMemValid;
uint32_t reserved;
VBOXVHWA_RECTL xUpdatedMemRect;
} in;
} u;
} VBOXVHWACMD_SURF_UNLOCK;
typedef struct VBOXVHWACMD_SURF_BLT
{
uint64_t DstGuestSurfInfo;
uint64_t SrcGuestSurfInfo;
union
{
struct
{
VBOXVHWA_SURFHANDLE hDstSurf;
uint64_t offDstSurface;
VBOXVHWA_RECTL dstRect;
VBOXVHWA_SURFHANDLE hSrcSurf;
uint64_t offSrcSurface;
VBOXVHWA_RECTL srcRect;
uint32_t flags;
uint32_t xUpdatedSrcMemValid;
VBOXVHWA_BLTFX desc;
VBOXVHWA_RECTL xUpdatedSrcMemRect;
} in;
} u;
} VBOXVHWACMD_SURF_BLT;
#ifdef VBOX_WITH_WDDM
typedef struct VBOXVHWACMD_SURF_COLORFILL
{
union
{
struct
{
VBOXVHWA_SURFHANDLE hSurf;
uint64_t offSurface;
uint32_t u32Reserved;
uint32_t cRects;
VBOXVHWA_RECTL aRects[1];
} in;
} u;
} VBOXVHWACMD_SURF_COLORFILL;
#endif
typedef struct VBOXVHWACMD_SURF_FLIP
{
uint64_t TargGuestSurfInfo;
uint64_t CurrGuestSurfInfo;
union
{
struct
{
VBOXVHWA_SURFHANDLE hTargSurf;
uint64_t offTargSurface;
VBOXVHWA_SURFHANDLE hCurrSurf;
uint64_t offCurrSurface;
uint32_t flags;
uint32_t xUpdatedTargMemValid;
VBOXVHWA_RECTL xUpdatedTargMemRect;
} in;
} u;
} VBOXVHWACMD_SURF_FLIP;
typedef struct VBOXVHWACMD_SURF_COLORKEY_SET
{
union
{
struct
{
VBOXVHWA_SURFHANDLE hSurf;
uint64_t offSurface;
VBOXVHWA_COLORKEY CKey;
uint32_t flags;
uint32_t reserved;
} in;
} u;
} VBOXVHWACMD_SURF_COLORKEY_SET;
#define VBOXVHWACMD_SURF_OVERLAY_UPDATE_F_SRCMEMRECT 0x00000001
#define VBOXVHWACMD_SURF_OVERLAY_UPDATE_F_DSTMEMRECT 0x00000002
typedef struct VBOXVHWACMD_SURF_OVERLAY_UPDATE
{
union
{
struct
{
VBOXVHWA_SURFHANDLE hDstSurf;
uint64_t offDstSurface;
VBOXVHWA_RECTL dstRect;
VBOXVHWA_SURFHANDLE hSrcSurf;
uint64_t offSrcSurface;
VBOXVHWA_RECTL srcRect;
uint32_t flags;
uint32_t xFlags;
VBOXVHWA_OVERLAYFX desc;
VBOXVHWA_RECTL xUpdatedSrcMemRect;
VBOXVHWA_RECTL xUpdatedDstMemRect;
} in;
} u;
}VBOXVHWACMD_SURF_OVERLAY_UPDATE;
typedef struct VBOXVHWACMD_SURF_OVERLAY_SETPOSITION
{
union
{
struct
{
VBOXVHWA_SURFHANDLE hDstSurf;
uint64_t offDstSurface;
VBOXVHWA_SURFHANDLE hSrcSurf;
uint64_t offSrcSurface;
uint32_t xPos;
uint32_t yPos;
uint32_t flags;
uint32_t reserved;
} in;
} u;
} VBOXVHWACMD_SURF_OVERLAY_SETPOSITION;
typedef struct VBOXVHWACMD_HH_CONSTRUCT
{
void *pVM;
/* VRAM info for the backend to be able to properly translate VRAM offsets */
void *pvVRAM;
uint32_t cbVRAM;
} VBOXVHWACMD_HH_CONSTRUCT;
typedef struct VBOXVHWACMD_HH_SAVESTATE_SAVEPERFORM
{
struct SSMHANDLE * pSSM;
} VBOXVHWACMD_HH_SAVESTATE_SAVEPERFORM;
typedef struct VBOXVHWACMD_HH_SAVESTATE_LOADPERFORM
{
struct SSMHANDLE * pSSM;
} VBOXVHWACMD_HH_SAVESTATE_LOADPERFORM;
typedef DECLCALLBACK(void) FNVBOXVHWA_HH_CALLBACK(void*);
typedef FNVBOXVHWA_HH_CALLBACK *PFNVBOXVHWA_HH_CALLBACK;
#define VBOXVHWA_HH_CALLBACK_SET(_pCmd, _pfn, _parg) \
do { \
(_pCmd)->GuestVBVAReserved1 = (uint64_t)(uintptr_t)(_pfn); \
(_pCmd)->GuestVBVAReserved2 = (uint64_t)(uintptr_t)(_parg); \
}while(0)
#define VBOXVHWA_HH_CALLBACK_GET(_pCmd) ((PFNVBOXVHWA_HH_CALLBACK)(_pCmd)->GuestVBVAReserved1)
#define VBOXVHWA_HH_CALLBACK_GET_ARG(_pCmd) ((void*)(_pCmd)->GuestVBVAReserved2)
#pragma pack()
#endif /* #ifdef VBOX_WITH_VIDEOHWACCEL */
/* All structures are without alignment. */
#pragma pack(1)
typedef struct VBVAHOSTFLAGS
{
uint32_t u32HostEvents;
uint32_t u32SupportedOrders;
} VBVAHOSTFLAGS;
typedef struct VBVABUFFER
{
VBVAHOSTFLAGS hostFlags;
/* The offset where the data start in the buffer. */
uint32_t off32Data;
/* The offset where next data must be placed in the buffer. */
uint32_t off32Free;
/* The queue of record descriptions. */
VBVARECORD aRecords[VBVA_MAX_RECORDS];
uint32_t indexRecordFirst;
uint32_t indexRecordFree;
/* Space to leave free in the buffer when large partial records are transferred. */
uint32_t cbPartialWriteThreshold;
uint32_t cbData;
uint8_t au8Data[1]; /* variable size for the rest of the VBVABUFFER area in VRAM. */
} VBVABUFFER;
#define VBVA_MAX_RECORD_SIZE (128*_1M)
/* guest->host commands */
#define VBVA_QUERY_CONF32 1
#define VBVA_SET_CONF32 2
#define VBVA_INFO_VIEW 3
#define VBVA_INFO_HEAP 4
#define VBVA_FLUSH 5
#define VBVA_INFO_SCREEN 6
#define VBVA_ENABLE 7
#define VBVA_MOUSE_POINTER_SHAPE 8
#ifdef VBOX_WITH_VIDEOHWACCEL
# define VBVA_VHWA_CMD 9
#endif /* # ifdef VBOX_WITH_VIDEOHWACCEL */
#ifdef VBOX_WITH_VDMA
# define VBVA_VDMA_CTL 10 /* setup G<->H DMA channel info */
# define VBVA_VDMA_CMD 11 /* G->H DMA command */
#endif
#define VBVA_INFO_CAPS 12 /* informs host about HGSMI caps. see VBVACAPS below */
#define VBVA_SCANLINE_CFG 13 /* configures scanline, see VBVASCANLINECFG below */
#define VBVA_SCANLINE_INFO 14 /* requests scanline info, see VBVASCANLINEINFO below */
#define VBVA_CMDVBVA_SUBMIT 16 /* inform host about VBVA Command submission */
#define VBVA_CMDVBVA_FLUSH 17 /* inform host about VBVA Command submission */
#define VBVA_CMDVBVA_CTL 18 /* G->H DMA command */
#define VBVA_QUERY_MODE_HINTS 19 /* Query most recent mode hints sent. */
/** Report the guest virtual desktop position and size for mapping host and
* guest pointer positions. */
#define VBVA_REPORT_INPUT_MAPPING 20
/** Report the guest cursor position and query the host position. */
#define VBVA_CURSOR_POSITION 21
/* host->guest commands */
#define VBVAHG_EVENT 1
#define VBVAHG_DISPLAY_CUSTOM 2
#ifdef VBOX_WITH_VDMA
#define VBVAHG_SHGSMI_COMPLETION 3
#endif
#ifdef VBOX_WITH_VIDEOHWACCEL
#define VBVAHG_DCUSTOM_VHWA_CMDCOMPLETE 1
#pragma pack(1)
typedef struct VBVAHOSTCMDVHWACMDCOMPLETE
{
uint32_t offCmd;
}VBVAHOSTCMDVHWACMDCOMPLETE;
#pragma pack()
#endif /* # ifdef VBOX_WITH_VIDEOHWACCEL */
#pragma pack(1)
typedef enum
{
VBVAHOSTCMD_OP_EVENT = 1,
VBVAHOSTCMD_OP_CUSTOM
}VBVAHOSTCMD_OP_TYPE;
typedef struct VBVAHOSTCMDEVENT
{
uint64_t pEvent;
}VBVAHOSTCMDEVENT;
typedef struct VBVAHOSTCMD
{
/* destination ID if >=0 specifies display index, otherwize the command is directed to the miniport */
int32_t iDstID;
int32_t customOpCode;
union
{
struct VBVAHOSTCMD *pNext;
uint32_t offNext;
uint64_t Data; /* the body is 64-bit aligned */
} u;
char body[1];
}VBVAHOSTCMD;
#define VBVAHOSTCMD_SIZE(_size) (sizeof(VBVAHOSTCMD) + (_size))
#define VBVAHOSTCMD_BODY(_pCmd, _tBody) ((_tBody*)(_pCmd)->body)
#define VBVAHOSTCMD_HDR(_pBody) ((VBVAHOSTCMD*)(((uint8_t*)_pBody) - RT_OFFSETOF(VBVAHOSTCMD, body)))
#define VBVAHOSTCMD_HDRSIZE (RT_OFFSETOF(VBVAHOSTCMD, body))
#pragma pack()
/* VBVACONF32::u32Index */
#define VBOX_VBVA_CONF32_MONITOR_COUNT 0
#define VBOX_VBVA_CONF32_HOST_HEAP_SIZE 1
/** Returns VINF_SUCCESS if the host can report mode hints via VBVA.
* Set value to VERR_NOT_SUPPORTED before calling. */
#define VBOX_VBVA_CONF32_MODE_HINT_REPORTING 2
/** Returns VINF_SUCCESS if the host can receive guest cursor information via
* VBVA. Set value to VERR_NOT_SUPPORTED before calling. */
#define VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING 3
/** Returns the currently available host cursor capabilities. Available if
* VBVACONF32::VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING returns success.
* @see VMMDevReqMouseStatus::mouseFeatures. */
#define VBOX_VBVA_CONF32_CURSOR_CAPABILITIES 4
/** Returns the supported flags in VBVAINFOSCREEN::u8Flags. */
#define VBOX_VBVA_CONF32_SCREEN_FLAGS 5
/** Returns the max size of VBVA record. */
#define VBOX_VBVA_CONF32_MAX_RECORD_SIZE 6
typedef struct VBVACONF32
{
uint32_t u32Index;
uint32_t u32Value;
} VBVACONF32;
typedef struct VBVAINFOVIEW
{
/* Index of the screen, assigned by the guest. */
uint32_t u32ViewIndex;
/* The screen offset in VRAM, the framebuffer starts here. */
uint32_t u32ViewOffset;
/* The size of the VRAM memory that can be used for the view. */
uint32_t u32ViewSize;
/* The recommended maximum size of the VRAM memory for the screen. */
uint32_t u32MaxScreenSize;
} VBVAINFOVIEW;
typedef struct VBVAINFOHEAP
{
/* Absolute offset in VRAM of the start of the heap. */
uint32_t u32HeapOffset;
/* The size of the heap. */
uint32_t u32HeapSize;
} VBVAINFOHEAP;
typedef struct VBVAFLUSH
{
uint32_t u32Reserved;
} VBVAFLUSH;
typedef struct VBVACMDVBVASUBMIT
{
uint32_t u32Reserved;
} VBVACMDVBVASUBMIT;
/* flush is requested because due to guest command buffer overflow */
#define VBVACMDVBVAFLUSH_F_GUEST_BUFFER_OVERFLOW 1
typedef struct VBVACMDVBVAFLUSH
{
uint32_t u32Flags;
} VBVACMDVBVAFLUSH;
/* VBVAINFOSCREEN::u8Flags */
#define VBVA_SCREEN_F_NONE 0x0000
#define VBVA_SCREEN_F_ACTIVE 0x0001
/** The virtual monitor has been disabled by the guest and should be removed
* by the host and ignored for purposes of pointer position calculation. */
#define VBVA_SCREEN_F_DISABLED 0x0002
/** The virtual monitor has been blanked by the guest and should be blacked
* out by the host. */
#define VBVA_SCREEN_F_BLANK 0x0004
typedef struct VBVAINFOSCREEN
{
/* Which view contains the screen. */
uint32_t u32ViewIndex;
/* Physical X origin relative to the primary screen. */
int32_t i32OriginX;
/* Physical Y origin relative to the primary screen. */
int32_t i32OriginY;
/* Offset of visible framebuffer relative to the framebuffer start. */
uint32_t u32StartOffset;
/* The scan line size in bytes. */
uint32_t u32LineSize;
/* Width of the screen. */
uint32_t u32Width;
/* Height of the screen. */
uint32_t u32Height;
/* Color depth. */
uint16_t u16BitsPerPixel;
/* VBVA_SCREEN_F_* */
uint16_t u16Flags;
} VBVAINFOSCREEN;
/* VBVAENABLE::u32Flags */
#define VBVA_F_NONE 0x00000000
#define VBVA_F_ENABLE 0x00000001
#define VBVA_F_DISABLE 0x00000002
/* extended VBVA to be used with WDDM */
#define VBVA_F_EXTENDED 0x00000004
/* vbva offset is absolute VRAM offset */
#define VBVA_F_ABSOFFSET 0x00000008
typedef struct VBVAENABLE
{
uint32_t u32Flags;
uint32_t u32Offset;
int32_t i32Result;
} VBVAENABLE;
typedef struct VBVAENABLE_EX
{
VBVAENABLE Base;
uint32_t u32ScreenId;
} VBVAENABLE_EX;
typedef struct VBVAMOUSEPOINTERSHAPE
{
/* The host result. */
int32_t i32Result;
/* VBOX_MOUSE_POINTER_* bit flags. */
uint32_t fu32Flags;
/* X coordinate of the hot spot. */
uint32_t u32HotX;
/* Y coordinate of the hot spot. */
uint32_t u32HotY;
/* Width of the pointer in pixels. */
uint32_t u32Width;
/* Height of the pointer in scanlines. */
uint32_t u32Height;
/* Pointer data.
*
****
* The data consists of 1 bpp AND mask followed by 32 bpp XOR (color) mask.
*
* For pointers without alpha channel the XOR mask pixels are 32 bit values: (lsb)BGR0(msb).
* For pointers with alpha channel the XOR mask consists of (lsb)BGRA(msb) 32 bit values.
*
* Guest driver must create the AND mask for pointers with alpha channel, so if host does not
* support alpha, the pointer could be displayed as a normal color pointer. The AND mask can
* be constructed from alpha values. For example alpha value >= 0xf0 means bit 0 in the AND mask.
*
* The AND mask is 1 bpp bitmap with byte aligned scanlines. Size of AND mask,
* therefore, is cbAnd = (width + 7) / 8 * height. The padding bits at the
* end of any scanline are undefined.
*
* The XOR mask follows the AND mask on the next 4 bytes aligned offset:
* uint8_t *pXor = pAnd + (cbAnd + 3) & ~3
* Bytes in the gap between the AND and the XOR mask are undefined.
* XOR mask scanlines have no gap between them and size of XOR mask is:
* cXor = width * 4 * height.
****
*
* Preallocate 4 bytes for accessing actual data as p->au8Data.
*/
uint8_t au8Data[4];
} VBVAMOUSEPOINTERSHAPE;
/* the guest driver can handle asynch guest cmd completion by reading the command offset from io port */
#define VBVACAPS_COMPLETEGCMD_BY_IOREAD 0x00000001
/* the guest driver can handle video adapter IRQs */
#define VBVACAPS_IRQ 0x00000002
/** The guest can read video mode hints sent via VBVA. */
#define VBVACAPS_VIDEO_MODE_HINTS 0x00000004
/** The guest can switch to a software cursor on demand. */
#define VBVACAPS_DISABLE_CURSOR_INTEGRATION 0x00000008
/** The guest does not depend on host handling the VBE registers. */
#define VBVACAPS_USE_VBVA_ONLY 0x00000010
typedef struct VBVACAPS
{
int32_t rc;
uint32_t fCaps;
} VBVACAPS;
/* makes graphics device generate IRQ on VSYNC */
#define VBVASCANLINECFG_ENABLE_VSYNC_IRQ 0x00000001
/* guest driver may request the current scanline */
#define VBVASCANLINECFG_ENABLE_SCANLINE_INFO 0x00000002
/* request the current refresh period, returned in u32RefreshPeriodMs */
#define VBVASCANLINECFG_QUERY_REFRESH_PERIOD 0x00000004
/* set new refresh period specified in u32RefreshPeriodMs.
* if used with VBVASCANLINECFG_QUERY_REFRESH_PERIOD,
* u32RefreshPeriodMs is set to the previous refresh period on return */
#define VBVASCANLINECFG_SET_REFRESH_PERIOD 0x00000008
typedef struct VBVASCANLINECFG
{
int32_t rc;
uint32_t fFlags;
uint32_t u32RefreshPeriodMs;
uint32_t u32Reserved;
} VBVASCANLINECFG;
typedef struct VBVASCANLINEINFO
{
int32_t rc;
uint32_t u32ScreenId;
uint32_t u32InVBlank;
uint32_t u32ScanLine;
} VBVASCANLINEINFO;
/** Query the most recent mode hints received from the host. */
typedef struct VBVAQUERYMODEHINTS
{
/** The maximum number of screens to return hints for. */
uint16_t cHintsQueried;
/** The size of the mode hint structures directly following this one. */
uint16_t cbHintStructureGuest;
/** The return code for the operation. Initialise to VERR_NOT_SUPPORTED. */
int32_t rc;
} VBVAQUERYMODEHINTS;
/** Structure in which a mode hint is returned. The guest allocates an array
* of these immediately after the VBVAQUERYMODEHINTS structure. To accomodate
* future extensions, the VBVAQUERYMODEHINTS structure specifies the size of
* the VBVAMODEHINT structures allocated by the guest, and the host only fills
* out structure elements which fit into that size. The host should fill any
* unused members (e.g. dx, dy) or structure space on the end with ~0. The
* whole structure can legally be set to ~0 to skip a screen. */
typedef struct VBVAMODEHINT
{
uint32_t magic;
uint32_t cx;
uint32_t cy;
uint32_t cBPP; /* Which has never been used... */
uint32_t cDisplay;
uint32_t dx; /**< X offset into the virtual frame-buffer. */
uint32_t dy; /**< Y offset into the virtual frame-buffer. */
uint32_t fEnabled; /* Not fFlags. Add new members for new flags. */
} VBVAMODEHINT;
#define VBVAMODEHINT_MAGIC UINT32_C(0x0801add9)
/** Report the rectangle relative to which absolute pointer events should be
* expressed. This information remains valid until the next VBVA resize event
* for any screen, at which time it is reset to the bounding rectangle of all
* virtual screens and must be re-set.
* @see VBVA_REPORT_INPUT_MAPPING. */
typedef struct VBVAREPORTINPUTMAPPING
{
int32_t x; /**< Upper left X co-ordinate relative to the first screen. */
int32_t y; /**< Upper left Y co-ordinate relative to the first screen. */
uint32_t cx; /**< Rectangle width. */
uint32_t cy; /**< Rectangle height. */
} VBVAREPORTINPUTMAPPING;
/** Report the guest cursor position and query the host one. The host may wish
* to use the guest information to re-position its own cursor (though this is
* currently unlikely).
* @see VBVA_CURSOR_POSITION */
typedef struct VBVACURSORPOSITION
{
uint32_t fReportPosition; /**< Are we reporting a position? */
uint32_t x; /**< Guest cursor X position */
uint32_t y; /**< Guest cursor Y position */
} VBVACURSORPOSITION;
#pragma pack()
typedef uint64_t VBOXVIDEOOFFSET;
#define VBOXVIDEOOFFSET_VOID ((VBOXVIDEOOFFSET)~0)
#pragma pack(1)
/*
* VBOXSHGSMI made on top HGSMI and allows receiving notifications
* about G->H command completion
*/
/* SHGSMI command header */
typedef struct VBOXSHGSMIHEADER
{
uint64_t pvNext; /*<- completion processing queue */
uint32_t fFlags; /*<- see VBOXSHGSMI_FLAG_XXX Flags */
uint32_t cRefs; /*<- command referece count */
uint64_t u64Info1; /*<- contents depends on the fFlags value */
uint64_t u64Info2; /*<- contents depends on the fFlags value */
} VBOXSHGSMIHEADER, *PVBOXSHGSMIHEADER;
typedef enum
{
VBOXVDMACMD_TYPE_UNDEFINED = 0,
VBOXVDMACMD_TYPE_DMA_PRESENT_BLT = 1,
VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER,
VBOXVDMACMD_TYPE_DMA_BPB_FILL,
VBOXVDMACMD_TYPE_DMA_PRESENT_SHADOW2PRIMARY,
VBOXVDMACMD_TYPE_DMA_PRESENT_CLRFILL,
VBOXVDMACMD_TYPE_DMA_PRESENT_FLIP,
VBOXVDMACMD_TYPE_DMA_NOP,
VBOXVDMACMD_TYPE_CHROMIUM_CMD, /* chromium cmd */
VBOXVDMACMD_TYPE_DMA_BPB_TRANSFER_VRAMSYS,
VBOXVDMACMD_TYPE_CHILD_STATUS_IRQ /* make the device notify child (monitor) state change IRQ */
} VBOXVDMACMD_TYPE;
#pragma pack()
/* the command processing was asynch, set by the host to indicate asynch command completion
* must not be cleared once set, the command completion is performed by issuing a host->guest completion command
* while keeping this flag unchanged */
#define VBOXSHGSMI_FLAG_HG_ASYNCH 0x00010000
#if 0
/* if set - asynch completion is performed by issuing the event,
* if cleared - asynch completion is performed by calling a callback */
#define VBOXSHGSMI_FLAG_GH_ASYNCH_EVENT 0x00000001
#endif
/* issue interrupt on asynch completion, used for critical G->H commands,
* i.e. for completion of which guest is waiting. */
#define VBOXSHGSMI_FLAG_GH_ASYNCH_IRQ 0x00000002
/* guest does not do any op on completion of this command,
* the host may copy the command and indicate that it does not need the command anymore
* by not setting VBOXSHGSMI_FLAG_HG_ASYNCH */
#define VBOXSHGSMI_FLAG_GH_ASYNCH_NOCOMPLETION 0x00000004
/* guest requires the command to be processed asynchronously,
* not setting VBOXSHGSMI_FLAG_HG_ASYNCH by the host in this case is treated as command failure */
#define VBOXSHGSMI_FLAG_GH_ASYNCH_FORCE 0x00000008
/* force IRQ on cmd completion */
#define VBOXSHGSMI_FLAG_GH_ASYNCH_IRQ_FORCE 0x00000010
/* an IRQ-level callback is associated with the command */
#define VBOXSHGSMI_FLAG_GH_ASYNCH_CALLBACK_IRQ 0x00000020
/* guest expects this command to be completed synchronously */
#define VBOXSHGSMI_FLAG_GH_SYNCH 0x00000040
DECLINLINE(uint8_t *) VBoxSHGSMIBufferData (const VBOXSHGSMIHEADER* pHeader)
{
return (uint8_t *)pHeader + sizeof (VBOXSHGSMIHEADER);
}
#define VBoxSHGSMIBufferHeaderSize() (sizeof (VBOXSHGSMIHEADER))
DECLINLINE(PVBOXSHGSMIHEADER) VBoxSHGSMIBufferHeader (const void *pvData)
{
return (PVBOXSHGSMIHEADER)((uint8_t *)pvData - sizeof (VBOXSHGSMIHEADER));
}
#ifdef VBOX_WITH_VDMA
# pragma pack(1)
/* VDMA - Video DMA */
/* VDMA Control API */
/* VBOXVDMA_CTL::u32Flags */
typedef enum
{
VBOXVDMA_CTL_TYPE_NONE = 0,
VBOXVDMA_CTL_TYPE_ENABLE,
VBOXVDMA_CTL_TYPE_DISABLE,
VBOXVDMA_CTL_TYPE_FLUSH,
VBOXVDMA_CTL_TYPE_WATCHDOG
} VBOXVDMA_CTL_TYPE;
typedef struct VBOXVDMA_CTL
{
VBOXVDMA_CTL_TYPE enmCtl;
uint32_t u32Offset;
int32_t i32Result;
} VBOXVDMA_CTL, *PVBOXVDMA_CTL;
typedef struct VBOXVDMA_RECTL
{
int16_t left;
int16_t top;
uint16_t width;
uint16_t height;
} VBOXVDMA_RECTL, *PVBOXVDMA_RECTL;
typedef enum
{
VBOXVDMA_PIXEL_FORMAT_UNKNOWN = 0,
VBOXVDMA_PIXEL_FORMAT_R8G8B8 = 20,
VBOXVDMA_PIXEL_FORMAT_A8R8G8B8 = 21,
VBOXVDMA_PIXEL_FORMAT_X8R8G8B8 = 22,
VBOXVDMA_PIXEL_FORMAT_R5G6B5 = 23,
VBOXVDMA_PIXEL_FORMAT_X1R5G5B5 = 24,
VBOXVDMA_PIXEL_FORMAT_A1R5G5B5 = 25,
VBOXVDMA_PIXEL_FORMAT_A4R4G4B4 = 26,
VBOXVDMA_PIXEL_FORMAT_R3G3B2 = 27,
VBOXVDMA_PIXEL_FORMAT_A8 = 28,
VBOXVDMA_PIXEL_FORMAT_A8R3G3B2 = 29,
VBOXVDMA_PIXEL_FORMAT_X4R4G4B4 = 30,
VBOXVDMA_PIXEL_FORMAT_A2B10G10R10 = 31,
VBOXVDMA_PIXEL_FORMAT_A8B8G8R8 = 32,
VBOXVDMA_PIXEL_FORMAT_X8B8G8R8 = 33,
VBOXVDMA_PIXEL_FORMAT_G16R16 = 34,
VBOXVDMA_PIXEL_FORMAT_A2R10G10B10 = 35,
VBOXVDMA_PIXEL_FORMAT_A16B16G16R16 = 36,
VBOXVDMA_PIXEL_FORMAT_A8P8 = 40,
VBOXVDMA_PIXEL_FORMAT_P8 = 41,
VBOXVDMA_PIXEL_FORMAT_L8 = 50,
VBOXVDMA_PIXEL_FORMAT_A8L8 = 51,
VBOXVDMA_PIXEL_FORMAT_A4L4 = 52,
VBOXVDMA_PIXEL_FORMAT_V8U8 = 60,
VBOXVDMA_PIXEL_FORMAT_L6V5U5 = 61,
VBOXVDMA_PIXEL_FORMAT_X8L8V8U8 = 62,
VBOXVDMA_PIXEL_FORMAT_Q8W8V8U8 = 63,
VBOXVDMA_PIXEL_FORMAT_V16U16 = 64,
VBOXVDMA_PIXEL_FORMAT_W11V11U10 = 65,
VBOXVDMA_PIXEL_FORMAT_A2W10V10U10 = 67
} VBOXVDMA_PIXEL_FORMAT;
typedef struct VBOXVDMA_SURF_DESC
{
uint32_t width;
uint32_t height;
VBOXVDMA_PIXEL_FORMAT format;
uint32_t bpp;
uint32_t pitch;
uint32_t fFlags;
} VBOXVDMA_SURF_DESC, *PVBOXVDMA_SURF_DESC;
/*typedef uint64_t VBOXVDMAPHADDRESS;*/
typedef uint64_t VBOXVDMASURFHANDLE;
/* region specified as a rectangle, otherwize it is a size of memory pointed to by phys address */
#define VBOXVDMAOPERAND_FLAGS_RECTL 0x1
/* Surface handle is valid */
#define VBOXVDMAOPERAND_FLAGS_PRIMARY 0x2
/* address is offset in VRAM */
#define VBOXVDMAOPERAND_FLAGS_VRAMOFFSET 0x4
/* VBOXVDMACBUF_DR::phBuf specifies offset in VRAM */
#define VBOXVDMACBUF_FLAG_BUF_VRAM_OFFSET 0x00000001
/* command buffer follows the VBOXVDMACBUF_DR in VRAM, VBOXVDMACBUF_DR::phBuf is ignored */
#define VBOXVDMACBUF_FLAG_BUF_FOLLOWS_DR 0x00000002
/*
* We can not submit the DMA command via VRAM since we do not have control over
* DMA command buffer [de]allocation, i.e. we only control the buffer contents.
* In other words the system may call one of our callbacks to fill a command buffer
* with the necessary commands and then discard the buffer w/o any notification.
*
* We have only DMA command buffer physical address at submission time.
*
* so the only way is to */
typedef struct VBOXVDMACBUF_DR
{
uint16_t fFlags;
uint16_t cbBuf;
/* RT_SUCCESS() - on success
* VERR_INTERRUPTED - on preemption
* VERR_xxx - on error */
int32_t rc;
union
{
uint64_t phBuf;
VBOXVIDEOOFFSET offVramBuf;
} Location;
uint64_t aGuestData[7];
} VBOXVDMACBUF_DR, *PVBOXVDMACBUF_DR;
#define VBOXVDMACBUF_DR_TAIL(_pCmd, _t) ( (_t*)(((uint8_t*)(_pCmd)) + sizeof (VBOXVDMACBUF_DR)) )
#define VBOXVDMACBUF_DR_FROM_TAIL(_pCmd) ( (VBOXVDMACBUF_DR*)(((uint8_t*)(_pCmd)) - sizeof (VBOXVDMACBUF_DR)) )
typedef struct VBOXVDMACMD
{
VBOXVDMACMD_TYPE enmType;
uint32_t u32CmdSpecific;
} VBOXVDMACMD, *PVBOXVDMACMD;
#define VBOXVDMACMD_HEADER_SIZE() sizeof (VBOXVDMACMD)
#define VBOXVDMACMD_SIZE_FROMBODYSIZE(_s) (VBOXVDMACMD_HEADER_SIZE() + (_s))
#define VBOXVDMACMD_SIZE(_t) (VBOXVDMACMD_SIZE_FROMBODYSIZE(sizeof (_t)))
#define VBOXVDMACMD_BODY(_pCmd, _t) ( (_t*)(((uint8_t*)(_pCmd)) + VBOXVDMACMD_HEADER_SIZE()) )
#define VBOXVDMACMD_BODY_SIZE(_s) ( (_s) - VBOXVDMACMD_HEADER_SIZE() )
#define VBOXVDMACMD_FROM_BODY(_pCmd) ( (VBOXVDMACMD*)(((uint8_t*)(_pCmd)) - VBOXVDMACMD_HEADER_SIZE()) )
#define VBOXVDMACMD_BODY_FIELD_OFFSET(_ot, _t, _f) ( (_ot)(uintptr_t)( VBOXVDMACMD_BODY(0, uint8_t) + RT_OFFSETOF(_t, _f) ) )
typedef struct VBOXVDMACMD_DMA_PRESENT_BLT
{
VBOXVIDEOOFFSET offSrc;
VBOXVIDEOOFFSET offDst;
VBOXVDMA_SURF_DESC srcDesc;
VBOXVDMA_SURF_DESC dstDesc;
VBOXVDMA_RECTL srcRectl;
VBOXVDMA_RECTL dstRectl;
uint32_t u32Reserved;
uint32_t cDstSubRects;
VBOXVDMA_RECTL aDstSubRects[1];
} VBOXVDMACMD_DMA_PRESENT_BLT, *PVBOXVDMACMD_DMA_PRESENT_BLT;
typedef struct VBOXVDMACMD_DMA_PRESENT_SHADOW2PRIMARY
{
VBOXVDMA_RECTL Rect;
} VBOXVDMACMD_DMA_PRESENT_SHADOW2PRIMARY, *PVBOXVDMACMD_DMA_PRESENT_SHADOW2PRIMARY;
#define VBOXVDMACMD_DMA_BPB_TRANSFER_F_SRC_VRAMOFFSET 0x00000001
#define VBOXVDMACMD_DMA_BPB_TRANSFER_F_DST_VRAMOFFSET 0x00000002
typedef struct VBOXVDMACMD_DMA_BPB_TRANSFER
{
uint32_t cbTransferSize;
uint32_t fFlags;
union
{
uint64_t phBuf;
VBOXVIDEOOFFSET offVramBuf;
} Src;
union
{
uint64_t phBuf;
VBOXVIDEOOFFSET offVramBuf;
} Dst;
} VBOXVDMACMD_DMA_BPB_TRANSFER, *PVBOXVDMACMD_DMA_BPB_TRANSFER;
#define VBOXVDMACMD_SYSMEMEL_F_PAGELIST 0x00000001
typedef struct VBOXVDMACMD_SYSMEMEL
{
uint32_t cPages;
uint32_t fFlags;
uint64_t phBuf[1];
} VBOXVDMACMD_SYSMEMEL, *PVBOXVDMACMD_SYSMEMEL;
#define VBOXVDMACMD_SYSMEMEL_NEXT(_pEl) (((_pEl)->fFlags & VBOXVDMACMD_SYSMEMEL_F_PAGELIST) ? \
((PVBOXVDMACMD_SYSMEMEL)(((uint8_t*)(_pEl))+RT_OFFSETOF(VBOXVDMACMD_SYSMEMEL, phBuf[(_pEl)->cPages]))) \
: \
((_pEl)+1)
#define VBOXVDMACMD_DMA_BPB_TRANSFER_VRAMSYS_SYS2VRAM 0x00000001
typedef struct VBOXVDMACMD_DMA_BPB_TRANSFER_VRAMSYS
{
uint32_t cTransferPages;
uint32_t fFlags;
VBOXVIDEOOFFSET offVramBuf;
VBOXVDMACMD_SYSMEMEL FirstEl;
} VBOXVDMACMD_DMA_BPB_TRANSFER_VRAMSYS, *PVBOXVDMACMD_DMA_BPB_TRANSFER_VRAMSYS;
typedef struct VBOXVDMACMD_DMA_BPB_FILL
{
VBOXVIDEOOFFSET offSurf;
uint32_t cbFillSize;
uint32_t u32FillPattern;
} VBOXVDMACMD_DMA_BPB_FILL, *PVBOXVDMACMD_DMA_BPB_FILL;
#define VBOXVDMA_CHILD_STATUS_F_CONNECTED 0x01
#define VBOXVDMA_CHILD_STATUS_F_DISCONNECTED 0x02
#define VBOXVDMA_CHILD_STATUS_F_ROTATED 0x04
typedef struct VBOXVDMA_CHILD_STATUS
{
uint32_t iChild;
uint8_t fFlags;
uint8_t u8RotationAngle;
uint16_t u16Reserved;
} VBOXVDMA_CHILD_STATUS, *PVBOXVDMA_CHILD_STATUS;
/* apply the aInfos are applied to all targets, the iTarget is ignored */
#define VBOXVDMACMD_CHILD_STATUS_IRQ_F_APPLY_TO_ALL 0x00000001
typedef struct VBOXVDMACMD_CHILD_STATUS_IRQ
{
uint32_t cInfos;
uint32_t fFlags;
VBOXVDMA_CHILD_STATUS aInfos[1];
} VBOXVDMACMD_CHILD_STATUS_IRQ, *PVBOXVDMACMD_CHILD_STATUS_IRQ;
# pragma pack()
#endif /* #ifdef VBOX_WITH_VDMA */
#pragma pack(1)
typedef struct VBOXVDMACMD_CHROMIUM_BUFFER
{
VBOXVIDEOOFFSET offBuffer;
uint32_t cbBuffer;
uint32_t u32GuestData;
uint64_t u64GuestData;
} VBOXVDMACMD_CHROMIUM_BUFFER, *PVBOXVDMACMD_CHROMIUM_BUFFER;
typedef struct VBOXVDMACMD_CHROMIUM_CMD
{
uint32_t cBuffers;
uint32_t u32Reserved;
VBOXVDMACMD_CHROMIUM_BUFFER aBuffers[1];
} VBOXVDMACMD_CHROMIUM_CMD, *PVBOXVDMACMD_CHROMIUM_CMD;
typedef enum
{
VBOXVDMACMD_CHROMIUM_CTL_TYPE_UNKNOWN = 0,
VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP,
VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_BEGIN,
VBOXVDMACMD_CHROMIUM_CTL_TYPE_SAVESTATE_END,
VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRHGSMI_SETUP_MAINCB,
VBOXVDMACMD_CHROMIUM_CTL_TYPE_CRCONNECT,
VBOXVDMACMD_CHROMIUM_CTL_TYPE_SIZEHACK = 0x7fffffff
} VBOXVDMACMD_CHROMIUM_CTL_TYPE;
typedef struct VBOXVDMACMD_CHROMIUM_CTL
{
VBOXVDMACMD_CHROMIUM_CTL_TYPE enmType;
uint32_t cbCmd;
} VBOXVDMACMD_CHROMIUM_CTL, *PVBOXVDMACMD_CHROMIUM_CTL;
typedef struct PDMIDISPLAYVBVACALLBACKS *HCRHGSMICMDCOMPLETION;
typedef DECLCALLBACK(int) FNCRHGSMICMDCOMPLETION(HCRHGSMICMDCOMPLETION hCompletion, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc);
typedef FNCRHGSMICMDCOMPLETION *PFNCRHGSMICMDCOMPLETION;
/* tells whether 3D backend has some 3D overlay data displayed */
typedef DECLCALLBACK(bool) FNCROGLHASDATA(void);
typedef FNCROGLHASDATA *PFNCROGLHASDATA;
/* same as PFNCROGLHASDATA, but for specific screen */
typedef DECLCALLBACK(bool) FNCROGLHASDATAFORSCREEN(uint32_t i32ScreenID);
typedef FNCROGLHASDATAFORSCREEN *PFNCROGLHASDATAFORSCREEN;
/* callbacks chrogl gives to main */
typedef struct CR_MAIN_INTERFACE
{
PFNCROGLHASDATA pfnHasData;
PFNCROGLHASDATAFORSCREEN pfnHasDataForScreen;
} CR_MAIN_INTERFACE;
typedef struct VBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP_MAINCB
{
VBOXVDMACMD_CHROMIUM_CTL Hdr;
/*in*/
HCRHGSMICMDCOMPLETION hCompletion;
PFNCRHGSMICMDCOMPLETION pfnCompletion;
/*out*/
CR_MAIN_INTERFACE MainInterface;
} VBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP_MAINCB, *PVBOXVDMACMD_CHROMIUM_CTL_CRHGSMI_SETUP_MAINCB;
typedef struct VBOXCRCON_SERVER *HVBOXCRCON_SERVER;
typedef struct PDMIDISPLAYVBVACALLBACKS* HVBOXCRCON_CLIENT;
typedef struct VBOXCRCON_3DRGN_CLIENT* HVBOXCRCON_3DRGN_CLIENT;
typedef struct VBOXCRCON_3DRGN_ASYNCCLIENT* HVBOXCRCON_3DRGN_ASYNCCLIENT;
/* server callbacks */
/* submit chromium cmd */
typedef DECLCALLBACK(int) FNVBOXCRCON_SVR_CRCMD(HVBOXCRCON_SERVER hServer, PVBOXVDMACMD_CHROMIUM_CMD pCmd, uint32_t cbCmd);
typedef FNVBOXCRCON_SVR_CRCMD *PFNVBOXCRCON_SVR_CRCMD;
/* submit chromium control cmd */
typedef DECLCALLBACK(int) FNVBOXCRCON_SVR_CRCTL(HVBOXCRCON_SERVER hServer, PVBOXVDMACMD_CHROMIUM_CTL pCtl, uint32_t cbCmd);
typedef FNVBOXCRCON_SVR_CRCTL *PFNVBOXCRCON_SVR_CRCTL;
/* request 3D data.
* The protocol is the following:
* 1. if there is no 3D data displayed on screen, returns VINF_EOF immediately w/o calling any PFNVBOXCRCON_3DRGN_XXX callbacks
* 2. otherwise calls PFNVBOXCRCON_3DRGN_ONSUBMIT, submits the "regions get" request to the CrOpenGL server to process it asynchronously and returns VINF_SUCCESS
* 2.a on "regions get" request processing calls PFNVBOXCRCON_3DRGN_BEGIN,
* 2.b then PFNVBOXCRCON_3DRGN_REPORT zero or more times for each 3D region,
* 2.c and then PFNVBOXCRCON_3DRGN_END
* 3. returns VERR_XXX code on failure
* */
typedef DECLCALLBACK(int) FNVBOXCRCON_SVR_3DRGN_GET(HVBOXCRCON_SERVER hServer, HVBOXCRCON_3DRGN_CLIENT hRgnClient, uint32_t idScreen);
typedef FNVBOXCRCON_SVR_3DRGN_GET *PFNVBOXCRCON_SVR_3DRGN_GET;
/* 3D Regions Client callbacks */
/* called from the PFNVBOXCRCON_SVR_3DRGN_GET callback in case server has 3D data and is going to process the request asynchronously,
* see comments for PFNVBOXCRCON_SVR_3DRGN_GET above */
typedef DECLCALLBACK(int) FNVBOXCRCON_3DRGN_ONSUBMIT(HVBOXCRCON_3DRGN_CLIENT hRgnClient, uint32_t idScreen, HVBOXCRCON_3DRGN_ASYNCCLIENT *phRgnAsyncClient);
typedef FNVBOXCRCON_3DRGN_ONSUBMIT *PFNVBOXCRCON_3DRGN_ONSUBMIT;
/* called from the "regions get" command processing thread, to indicate that the "regions get" is started.
* see comments for PFNVBOXCRCON_SVR_3DRGN_GET above */
typedef DECLCALLBACK(int) FNVBOXCRCON_3DRGN_BEGIN(HVBOXCRCON_3DRGN_ASYNCCLIENT hRgnAsyncClient, uint32_t idScreen);
typedef FNVBOXCRCON_3DRGN_BEGIN *PFNVBOXCRCON_3DRGN_BEGIN;
/* called from the "regions get" command processing thread, to report a 3D region.
* see comments for PFNVBOXCRCON_SVR_3DRGN_GET above */
typedef DECLCALLBACK(int) FNVBOXCRCON_3DRGN_REPORT(HVBOXCRCON_3DRGN_ASYNCCLIENT hRgnAsyncClient, uint32_t idScreen, void *pvData, uint32_t cbStride, const RTRECT *pRect);
typedef FNVBOXCRCON_3DRGN_REPORT *PFNVBOXCRCON_3DRGN_REPORT;
/* called from the "regions get" command processing thread, to indicate that the "regions get" is completed.
* see comments for PFNVBOXCRCON_SVR_3DRGN_GET above */
typedef DECLCALLBACK(int) FNVBOXCRCON_3DRGN_END(HVBOXCRCON_3DRGN_ASYNCCLIENT hRgnAsyncClient, uint32_t idScreen);
typedef FNVBOXCRCON_3DRGN_END *PFNVBOXCRCON_3DRGN_END;
/* client callbacks */
/* complete chromium cmd */
typedef DECLCALLBACK(int) FNVBOXCRCON_CLT_CRCTL_COMPLETE(HVBOXCRCON_CLIENT hClient, PVBOXVDMACMD_CHROMIUM_CTL pCtl, int rc);
typedef FNVBOXCRCON_CLT_CRCTL_COMPLETE *PFNVBOXCRCON_CLT_CRCTL_COMPLETE;
/* complete chromium control cmd */
typedef DECLCALLBACK(int) FNVBOXCRCON_CLT_CRCMD_COMPLETE(HVBOXCRCON_CLIENT hClient, PVBOXVDMACMD_CHROMIUM_CMD pCmd, int rc);
typedef FNVBOXCRCON_CLT_CRCMD_COMPLETE *PFNVBOXCRCON_CLT_CRCMD_COMPLETE;
typedef struct VBOXCRCON_SERVER_CALLBACKS
{
HVBOXCRCON_SERVER hServer;
PFNVBOXCRCON_SVR_CRCMD pfnCrCmd;
PFNVBOXCRCON_SVR_CRCTL pfnCrCtl;
PFNVBOXCRCON_SVR_3DRGN_GET pfn3DRgnGet;
} VBOXCRCON_SERVER_CALLBACKS, *PVBOXCRCON_SERVER_CALLBACKS;
typedef struct VBOXCRCON_CLIENT_CALLBACKS
{
HVBOXCRCON_CLIENT hClient;
PFNVBOXCRCON_CLT_CRCMD_COMPLETE pfnCrCmdComplete;
PFNVBOXCRCON_CLT_CRCTL_COMPLETE pfnCrCtlComplete;
PFNVBOXCRCON_3DRGN_ONSUBMIT pfn3DRgnOnSubmit;
PFNVBOXCRCON_3DRGN_BEGIN pfn3DRgnBegin;
PFNVBOXCRCON_3DRGN_REPORT pfn3DRgnReport;
PFNVBOXCRCON_3DRGN_END pfn3DRgnEnd;
} VBOXCRCON_CLIENT_CALLBACKS, *PVBOXCRCON_CLIENT_CALLBACKS;
/* issued by Main to establish connection between Main and CrOpenGL service */
typedef struct VBOXVDMACMD_CHROMIUM_CTL_CRCONNECT
{
VBOXVDMACMD_CHROMIUM_CTL Hdr;
/*input (filled by Client) :*/
/*class VMMDev*/void *pVMMDev;
VBOXCRCON_CLIENT_CALLBACKS ClientCallbacks;
/*output (filled by Server) :*/
VBOXCRCON_SERVER_CALLBACKS ServerCallbacks;
} VBOXVDMACMD_CHROMIUM_CTL_CRCONNECT, *PVBOXVDMACMD_CHROMIUM_CTL_CRCONNECT;
/* ring command buffer dr */
#define VBOXCMDVBVA_STATE_SUBMITTED 1
#define VBOXCMDVBVA_STATE_CANCELLED 2
#define VBOXCMDVBVA_STATE_IN_PROGRESS 3
/* the "completed" state is signalled via the ring buffer values */
/* CrHgsmi command */
#define VBOXCMDVBVA_OPTYPE_CRCMD 1
/* blit command that does blitting of allocations identified by VRAM offset or host id
* for VRAM-offset ones the size and format are same as primary */
#define VBOXCMDVBVA_OPTYPE_BLT 2
/* flip */
#define VBOXCMDVBVA_OPTYPE_FLIP 3
/* ColorFill */
#define VBOXCMDVBVA_OPTYPE_CLRFILL 4
/* allocation paging transfer request */
#define VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER 5
/* allocation paging fill request */
#define VBOXCMDVBVA_OPTYPE_PAGING_FILL 6
/* same as VBOXCMDVBVA_OPTYPE_NOP, but contains VBOXCMDVBVA_HDR data */
#define VBOXCMDVBVA_OPTYPE_NOPCMD 7
/* actual command is stored in guest system memory */
#define VBOXCMDVBVA_OPTYPE_SYSMEMCMD 8
/* complex command - i.e. can contain multiple commands
* i.e. the VBOXCMDVBVA_OPTYPE_COMPLEXCMD VBOXCMDVBVA_HDR is followed
* by one or more VBOXCMDVBVA_HDR commands.
* Each command's size is specified in it's VBOXCMDVBVA_HDR's u32FenceID field */
#define VBOXCMDVBVA_OPTYPE_COMPLEXCMD 9
/* nop - is a one-bit command. The buffer size to skip is determined by VBVA buffer size */
#define VBOXCMDVBVA_OPTYPE_NOP 0x80
/* u8Flags flags */
/* transfer from RAM to Allocation */
#define VBOXCMDVBVA_OPF_PAGING_TRANSFER_IN 0x80
#define VBOXCMDVBVA_OPF_BLT_TYPE_SAMEDIM_A8R8G8B8 0
#define VBOXCMDVBVA_OPF_BLT_TYPE_GENERIC_A8R8G8B8 1
#define VBOXCMDVBVA_OPF_BLT_TYPE_OFFPRIMSZFMT_OR_ID 2
#define VBOXCMDVBVA_OPF_BLT_TYPE_MASK 3
#define VBOXCMDVBVA_OPF_CLRFILL_TYPE_GENERIC_A8R8G8B8 0
#define VBOXCMDVBVA_OPF_CLRFILL_TYPE_MASK 1
/* blit direction is from first operand to second */
#define VBOXCMDVBVA_OPF_BLT_DIR_IN_2 0x10
/* operand 1 contains host id */
#define VBOXCMDVBVA_OPF_OPERAND1_ISID 0x20
/* operand 2 contains host id */
#define VBOXCMDVBVA_OPF_OPERAND2_ISID 0x40
/* primary hint id is src */
#define VBOXCMDVBVA_OPF_PRIMARY_HINT_SRC 0x80
/* trying to make the header as small as possible,
* we'd have pretty few op codes actually, so 8bit is quite enough,
* we will be able to extend it in any way. */
typedef struct VBOXCMDVBVA_HDR
{
/* one VBOXCMDVBVA_OPTYPE_XXX, except NOP, see comments above */
uint8_t u8OpCode;
/* command-specific
* VBOXCMDVBVA_OPTYPE_CRCMD - must be null
* VBOXCMDVBVA_OPTYPE_BLT - OR-ed VBOXCMDVBVA_OPF_ALLOC_XXX flags
* VBOXCMDVBVA_OPTYPE_PAGING_TRANSFER - must be null
* VBOXCMDVBVA_OPTYPE_PAGING_FILL - must be null
* VBOXCMDVBVA_OPTYPE_NOPCMD - must be null
* VBOXCMDVBVA_OPTYPE_NOP - not applicable (as the entire VBOXCMDVBVA_HDR is not valid) */
uint8_t u8Flags;
/* one of VBOXCMDVBVA_STATE_XXX*/
volatile uint8_t u8State;
union
{
/* result, 0 on success, otherwise contains the failure code TBD */
int8_t i8Result;
uint8_t u8PrimaryID;
} u;
union
{
/* complex command (VBOXCMDVBVA_OPTYPE_COMPLEXCMD) element data */
struct
{
/* command length */
uint16_t u16CbCmdHost;
/* guest-specific data, host expects it to be NULL */
uint16_t u16CbCmdGuest;
} complexCmdEl;
/* DXGK DDI fence ID */
uint32_t u32FenceID;
} u2;
} VBOXCMDVBVA_HDR;
typedef uint32_t VBOXCMDVBVAOFFSET;
typedef uint64_t VBOXCMDVBVAPHADDR;
typedef uint32_t VBOXCMDVBVAPAGEIDX;
typedef struct VBOXCMDVBVA_CRCMD_BUFFER
{
uint32_t cbBuffer;
VBOXCMDVBVAOFFSET offBuffer;
} VBOXCMDVBVA_CRCMD_BUFFER;
typedef struct VBOXCMDVBVA_CRCMD_CMD
{
uint32_t cBuffers;
VBOXCMDVBVA_CRCMD_BUFFER aBuffers[1];
} VBOXCMDVBVA_CRCMD_CMD;
typedef struct VBOXCMDVBVA_CRCMD
{
VBOXCMDVBVA_HDR Hdr;
VBOXCMDVBVA_CRCMD_CMD Cmd;
} VBOXCMDVBVA_CRCMD;
typedef struct VBOXCMDVBVA_ALLOCINFO
{
union
{
VBOXCMDVBVAOFFSET offVRAM;
uint32_t id;
} u;
} VBOXCMDVBVA_ALLOCINFO;
typedef struct VBOXCMDVBVA_ALLOCDESC
{
VBOXCMDVBVA_ALLOCINFO Info;
uint16_t u16Width;
uint16_t u16Height;
} VBOXCMDVBVA_ALLOCDESC;
typedef struct VBOXCMDVBVA_RECT
{
/** Coordinates of affected rectangle. */
int16_t xLeft;
int16_t yTop;
int16_t xRight;
int16_t yBottom;
} VBOXCMDVBVA_RECT;
typedef struct VBOXCMDVBVA_POINT
{
int16_t x;
int16_t y;
} VBOXCMDVBVA_POINT;
typedef struct VBOXCMDVBVA_BLT_HDR
{
VBOXCMDVBVA_HDR Hdr;
VBOXCMDVBVA_POINT Pos;
} VBOXCMDVBVA_BLT_HDR;
typedef struct VBOXCMDVBVA_BLT_PRIMARY
{
VBOXCMDVBVA_BLT_HDR Hdr;
VBOXCMDVBVA_ALLOCINFO alloc;
/* the rects count is determined from the command size */
VBOXCMDVBVA_RECT aRects[1];
} VBOXCMDVBVA_BLT_PRIMARY;
typedef struct VBOXCMDVBVA_BLT_PRIMARY_GENERIC_A8R8G8B8
{
VBOXCMDVBVA_BLT_HDR Hdr;
VBOXCMDVBVA_ALLOCDESC alloc;
/* the rects count is determined from the command size */
VBOXCMDVBVA_RECT aRects[1];
} VBOXCMDVBVA_BLT_PRIMARY_GENERIC_A8R8G8B8;
typedef struct VBOXCMDVBVA_BLT_OFFPRIMSZFMT_OR_ID
{
VBOXCMDVBVA_BLT_HDR Hdr;
VBOXCMDVBVA_ALLOCINFO alloc;
uint32_t id;
/* the rects count is determined from the command size */
VBOXCMDVBVA_RECT aRects[1];
} VBOXCMDVBVA_BLT_OFFPRIMSZFMT_OR_ID;
typedef struct VBOXCMDVBVA_BLT_SAMEDIM_A8R8G8B8
{
VBOXCMDVBVA_BLT_HDR Hdr;
VBOXCMDVBVA_ALLOCDESC alloc1;
VBOXCMDVBVA_ALLOCINFO info2;
/* the rects count is determined from the command size */
VBOXCMDVBVA_RECT aRects[1];
} VBOXCMDVBVA_BLT_SAMEDIM_A8R8G8B8;
typedef struct VBOXCMDVBVA_BLT_GENERIC_A8R8G8B8
{
VBOXCMDVBVA_BLT_HDR Hdr;
VBOXCMDVBVA_ALLOCDESC alloc1;
VBOXCMDVBVA_ALLOCDESC alloc2;
/* the rects count is determined from the command size */
VBOXCMDVBVA_RECT aRects[1];
} VBOXCMDVBVA_BLT_GENERIC_A8R8G8B8;
#define VBOXCMDVBVA_SIZEOF_BLTSTRUCT_MAX (sizeof (VBOXCMDVBVA_BLT_GENERIC_A8R8G8B8))
typedef struct VBOXCMDVBVA_FLIP
{
VBOXCMDVBVA_HDR Hdr;
VBOXCMDVBVA_ALLOCINFO src;
VBOXCMDVBVA_RECT aRects[1];
} VBOXCMDVBVA_FLIP;
#define VBOXCMDVBVA_SIZEOF_FLIPSTRUCT_MIN (RT_OFFSETOF(VBOXCMDVBVA_FLIP, aRects))
typedef struct VBOXCMDVBVA_CLRFILL_HDR
{
VBOXCMDVBVA_HDR Hdr;
uint32_t u32Color;
} VBOXCMDVBVA_CLRFILL_HDR;
typedef struct VBOXCMDVBVA_CLRFILL_PRIMARY
{
VBOXCMDVBVA_CLRFILL_HDR Hdr;
VBOXCMDVBVA_RECT aRects[1];
} VBOXCMDVBVA_CLRFILL_PRIMARY;
typedef struct VBOXCMDVBVA_CLRFILL_GENERIC_A8R8G8B8
{
VBOXCMDVBVA_CLRFILL_HDR Hdr;
VBOXCMDVBVA_ALLOCDESC dst;
VBOXCMDVBVA_RECT aRects[1];
} VBOXCMDVBVA_CLRFILL_GENERIC_A8R8G8B8;
#define VBOXCMDVBVA_SIZEOF_CLRFILLSTRUCT_MAX (sizeof (VBOXCMDVBVA_CLRFILL_GENERIC_A8R8G8B8))
#if 0
#define VBOXCMDVBVA_SYSMEMEL_CPAGES_MAX 0x1000
typedef struct VBOXCMDVBVA_SYSMEMEL
{
uint32_t cPagesAfterFirst : 12;
uint32_t iPage1 : 20;
uint32_t iPage2;
} VBOXCMDVBVA_SYSMEMEL;
#endif
typedef struct VBOXCMDVBVA_PAGING_TRANSFER_DATA
{
/* for now can only contain offVRAM.
* paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
VBOXCMDVBVA_ALLOCINFO Alloc;
VBOXCMDVBVAPAGEIDX aPageNumbers[1];
} VBOXCMDVBVA_PAGING_TRANSFER_DATA;
typedef struct VBOXCMDVBVA_PAGING_TRANSFER
{
VBOXCMDVBVA_HDR Hdr;
VBOXCMDVBVA_PAGING_TRANSFER_DATA Data;
} VBOXCMDVBVA_PAGING_TRANSFER;
typedef struct VBOXCMDVBVA_PAGING_FILL
{
VBOXCMDVBVA_HDR Hdr;
uint32_t u32CbFill;
uint32_t u32Pattern;
/* paging transfer can NOT be initiated for allocations having host 3D object (hostID) associated */
VBOXCMDVBVAOFFSET offVRAM;
} VBOXCMDVBVA_PAGING_FILL;
typedef struct VBOXCMDVBVA_SYSMEMCMD
{
VBOXCMDVBVA_HDR Hdr;
VBOXCMDVBVAPHADDR phCmd;
} VBOXCMDVBVA_SYSMEMCMD;
#define VBOXCMDVBVACTL_TYPE_ENABLE 1
#define VBOXCMDVBVACTL_TYPE_3DCTL 2
#define VBOXCMDVBVACTL_TYPE_RESIZE 3
typedef struct VBOXCMDVBVA_CTL
{
uint32_t u32Type;
int32_t i32Result;
} VBOXCMDVBVA_CTL;
typedef struct VBOXCMDVBVA_CTL_ENABLE
{
VBOXCMDVBVA_CTL Hdr;
VBVAENABLE Enable;
} VBOXCMDVBVA_CTL_ENABLE;
#define VBOXCMDVBVA_SCREENMAP_SIZE(_elType) ((VBOX_VIDEO_MAX_SCREENS + sizeof (_elType) - 1) / sizeof (_elType))
#define VBOXCMDVBVA_SCREENMAP_DECL(_elType, _name) _elType _name[VBOXCMDVBVA_SCREENMAP_SIZE(_elType)]
typedef struct VBOXCMDVBVA_RESIZE_ENTRY
{
VBVAINFOSCREEN Screen;
VBOXCMDVBVA_SCREENMAP_DECL(uint32_t, aTargetMap);
} VBOXCMDVBVA_RESIZE_ENTRY;
typedef struct VBOXCMDVBVA_RESIZE
{
VBOXCMDVBVA_RESIZE_ENTRY aEntries[1];
} VBOXCMDVBVA_RESIZE;
typedef struct VBOXCMDVBVA_CTL_RESIZE
{
VBOXCMDVBVA_CTL Hdr;
VBOXCMDVBVA_RESIZE Resize;
} VBOXCMDVBVA_CTL_RESIZE;
#define VBOXCMDVBVA3DCTL_TYPE_CONNECT 1
#define VBOXCMDVBVA3DCTL_TYPE_DISCONNECT 2
#define VBOXCMDVBVA3DCTL_TYPE_CMD 3
typedef struct VBOXCMDVBVA_3DCTL
{
uint32_t u32Type;
uint32_t u32CmdClientId;
} VBOXCMDVBVA_3DCTL;
typedef struct VBOXCMDVBVA_3DCTL_CONNECT
{
VBOXCMDVBVA_3DCTL Hdr;
uint32_t u32MajorVersion;
uint32_t u32MinorVersion;
uint64_t u64Pid;
} VBOXCMDVBVA_3DCTL_CONNECT;
typedef struct VBOXCMDVBVA_3DCTL_CMD
{
VBOXCMDVBVA_3DCTL Hdr;
VBOXCMDVBVA_HDR Cmd;
} VBOXCMDVBVA_3DCTL_CMD;
typedef struct VBOXCMDVBVA_CTL_3DCTL_CMD
{
VBOXCMDVBVA_CTL Hdr;
VBOXCMDVBVA_3DCTL_CMD Cmd;
} VBOXCMDVBVA_CTL_3DCTL_CMD;
typedef struct VBOXCMDVBVA_CTL_3DCTL_CONNECT
{
VBOXCMDVBVA_CTL Hdr;
VBOXCMDVBVA_3DCTL_CONNECT Connect;
} VBOXCMDVBVA_CTL_3DCTL_CONNECT;
typedef struct VBOXCMDVBVA_CTL_3DCTL
{
VBOXCMDVBVA_CTL Hdr;
VBOXCMDVBVA_3DCTL Ctl;
} VBOXCMDVBVA_CTL_3DCTL;
#pragma pack()
#ifdef VBOXVDMA_WITH_VBVA
# pragma pack(1)
typedef struct VBOXVDMAVBVACMD
{
HGSMIOFFSET offCmd;
} VBOXVDMAVBVACMD;
#pragma pack()
#endif
#endif
/** @file
*
* VBox Host Guest Shared Memory Interface (HGSMI).
* OS-independent guest structures.
*/
/*
* Copyright (C) 2006-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
#ifndef __HGSMI_GUEST_h__
#define __HGSMI_GUEST_h__
#include <VBox/HGSMI/HGSMI.h>
#include <VBox/HGSMI/HGSMIChSetup.h>
#include <VBox/VBoxVideo.h>
#ifdef VBOX_XPDM_MINIPORT
RT_C_DECLS_BEGIN
# include "miniport.h"
# include "ntddvdeo.h"
# include <Video.h>
RT_C_DECLS_END
#elif defined VBOX_GUESTR3XORGMOD
# include <compiler.h>
#else
# include <iprt/asm-amd64-x86.h>
#endif
#ifdef VBOX_WDDM_MINIPORT
# include "wddm/VBoxMPShgsmi.h"
typedef VBOXSHGSMI HGSMIGUESTCMDHEAP;
# define HGSMIGUESTCMDHEAP_GET(_p) (&(_p)->Heap)
#else
typedef HGSMIHEAP HGSMIGUESTCMDHEAP;
# define HGSMIGUESTCMDHEAP_GET(_p) (_p)
#endif
RT_C_DECLS_BEGIN
/**
* Structure grouping the context needed for submitting commands to the host
* via HGSMI
*/
typedef struct HGSMIGUESTCOMMANDCONTEXT
{
/** Information about the memory heap located in VRAM from which data
* structures to be sent to the host are allocated. */
HGSMIGUESTCMDHEAP heapCtx;
/** The I/O port used for submitting commands to the host by writing their
* offsets into the heap. */
RTIOPORT port;
} HGSMIGUESTCOMMANDCONTEXT, *PHGSMIGUESTCOMMANDCONTEXT;
/**
* Structure grouping the context needed for receiving commands from the host
* via HGSMI
*/
typedef struct HGSMIHOSTCOMMANDCONTEXT
{
/** Information about the memory area located in VRAM in which the host
* places data structures to be read by the guest. */
HGSMIAREA areaCtx;
/** Convenience structure used for matching host commands to handlers. */
/** @todo handlers are registered individually in code rather than just
* passing a static structure in order to gain extra flexibility. There is
* currently no expected usage case for this though. Is the additional
* complexity really justified? */
HGSMICHANNELINFO channels;
/** Flag to indicate that one thread is currently processing the command
* queue. */
volatile bool fHostCmdProcessing;
/* Pointer to the VRAM location where the HGSMI host flags are kept. */
volatile HGSMIHOSTFLAGS *pfHostFlags;
/** The I/O port used for receiving commands from the host as offsets into
* the memory area and sending back confirmations (command completion,
* IRQ acknowlegement). */
RTIOPORT port;
} HGSMIHOSTCOMMANDCONTEXT, *PHGSMIHOSTCOMMANDCONTEXT;
/**
* Structure grouping the context needed for sending graphics acceleration
* information to the host via VBVA. Each screen has its own VBVA buffer.
*/
typedef struct VBVABUFFERCONTEXT
{
/** Offset of the buffer in the VRAM section for the screen */
uint32_t offVRAMBuffer;
/** Length of the buffer in bytes */
uint32_t cbBuffer;
/** This flag is set if we wrote to the buffer faster than the host could
* read it. */
bool fHwBufferOverflow;
/** The VBVA record that we are currently preparing for the host, NULL if
* none. */
struct VBVARECORD *pRecord;
/** Pointer to the VBVA buffer mapped into the current address space. Will
* be NULL if VBVA is not enabled. */
struct VBVABUFFER *pVBVA;
} VBVABUFFERCONTEXT, *PVBVABUFFERCONTEXT;
/** @name Helper functions
* @{ */
/** Write an 8-bit value to an I/O port. */
DECLINLINE(void) VBoxVideoCmnPortWriteUchar(RTIOPORT Port, uint8_t Value)
{
#ifdef VBOX_XPDM_MINIPORT
VideoPortWritePortUchar((PUCHAR)Port, Value);
#elif defined VBOX_GUESTR3XORGMOD
outb(Port, Value);
#else /** @todo make these explicit */
ASMOutU8(Port, Value);
#endif
}
/** Write a 16-bit value to an I/O port. */
DECLINLINE(void) VBoxVideoCmnPortWriteUshort(RTIOPORT Port, uint16_t Value)
{
#ifdef VBOX_XPDM_MINIPORT
VideoPortWritePortUshort((PUSHORT)Port,Value);
#elif defined VBOX_GUESTR3XORGMOD
outw(Port, Value);
#else
ASMOutU16(Port, Value);
#endif
}
/** Write a 32-bit value to an I/O port. */
DECLINLINE(void) VBoxVideoCmnPortWriteUlong(RTIOPORT Port, uint32_t Value)
{
#ifdef VBOX_XPDM_MINIPORT
VideoPortWritePortUlong((PULONG)Port,Value);
#elif defined VBOX_GUESTR3XORGMOD
outl(Port, Value);
#else
ASMOutU32(Port, Value);
#endif
}
/** Read an 8-bit value from an I/O port. */
DECLINLINE(uint8_t) VBoxVideoCmnPortReadUchar(RTIOPORT Port)
{
#ifdef VBOX_XPDM_MINIPORT
return VideoPortReadPortUchar((PUCHAR)Port);
#elif defined VBOX_GUESTR3XORGMOD
return inb(Port);
#else
return ASMInU8(Port);
#endif
}
/** Read a 16-bit value from an I/O port. */
DECLINLINE(uint16_t) VBoxVideoCmnPortReadUshort(RTIOPORT Port)
{
#ifdef VBOX_XPDM_MINIPORT
return VideoPortReadPortUshort((PUSHORT)Port);
#elif defined VBOX_GUESTR3XORGMOD
return inw(Port);
#else
return ASMInU16(Port);
#endif
}
/** Read a 32-bit value from an I/O port. */
DECLINLINE(uint32_t) VBoxVideoCmnPortReadUlong(RTIOPORT Port)
{
#ifdef VBOX_XPDM_MINIPORT
return VideoPortReadPortUlong((PULONG)Port);
#elif defined VBOX_GUESTR3XORGMOD
return inl(Port);
#else
return ASMInU32(Port);
#endif
}
/** @} */
/** @name Base HGSMI APIs
* @{ */
/** Acknowlege an IRQ. */
DECLINLINE(void) VBoxHGSMIClearIrq(PHGSMIHOSTCOMMANDCONTEXT pCtx)
{
VBoxVideoCmnPortWriteUlong(pCtx->port, HGSMIOFFSET_VOID);
}
RTDECL(void) VBoxHGSMIHostCmdComplete(PHGSMIHOSTCOMMANDCONTEXT pCtx,
void *pvMem);
RTDECL(void) VBoxHGSMIProcessHostQueue(PHGSMIHOSTCOMMANDCONTEXT pCtx);
RTDECL(bool) VBoxHGSMIIsSupported(void);
RTDECL(void *) VBoxHGSMIBufferAlloc(PHGSMIGUESTCOMMANDCONTEXT pCtx,
HGSMISIZE cbData,
uint8_t u8Ch,
uint16_t u16Op);
RTDECL(void) VBoxHGSMIBufferFree(PHGSMIGUESTCOMMANDCONTEXT pCtx,
void *pvBuffer);
RTDECL(int) VBoxHGSMIBufferSubmit(PHGSMIGUESTCOMMANDCONTEXT pCtx,
void *pvBuffer);
RTDECL(void) VBoxHGSMIGetBaseMappingInfo(uint32_t cbVRAM,
uint32_t *poffVRAMBaseMapping,
uint32_t *pcbMapping,
uint32_t *poffGuestHeapMemory,
uint32_t *pcbGuestHeapMemory,
uint32_t *poffHostFlags);
RTDECL(int) VBoxHGSMIReportFlagsLocation(PHGSMIGUESTCOMMANDCONTEXT pCtx,
HGSMIOFFSET offLocation);
RTDECL(int) VBoxHGSMISendCapsInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t fCaps);
/** @todo we should provide a cleanup function too as part of the API */
RTDECL(int) VBoxHGSMISetupGuestContext(PHGSMIGUESTCOMMANDCONTEXT pCtx,
void *pvGuestHeapMemory,
uint32_t cbGuestHeapMemory,
uint32_t offVRAMGuestHeapMemory,
const HGSMIENV *pEnv);
RTDECL(void) VBoxHGSMIGetHostAreaMapping(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t cbVRAM,
uint32_t offVRAMBaseMapping,
uint32_t *poffVRAMHostArea,
uint32_t *pcbHostArea);
RTDECL(void) VBoxHGSMISetupHostContext(PHGSMIHOSTCOMMANDCONTEXT pCtx,
void *pvBaseMapping,
uint32_t offHostFlags,
void *pvHostAreaMapping,
uint32_t offVRAMHostArea,
uint32_t cbHostArea);
RTDECL(int) VBoxHGSMISendHostCtxInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
HGSMIOFFSET offVRAMFlagsLocation,
uint32_t fCaps,
uint32_t offVRAMHostArea,
uint32_t cbHostArea);
RTDECL(int) VBoxQueryConfHGSMI(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t u32Index, uint32_t *pulValue);
RTDECL(int) VBoxQueryConfHGSMIDef(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t u32Index, uint32_t u32DefValue, uint32_t *pulValue);
RTDECL(int) VBoxHGSMIUpdatePointerShape(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t fFlags,
uint32_t cHotX,
uint32_t cHotY,
uint32_t cWidth,
uint32_t cHeight,
uint8_t *pPixels,
uint32_t cbLength);
RTDECL(int) VBoxHGSMICursorPosition(PHGSMIGUESTCOMMANDCONTEXT pCtx, bool fReportPosition, uint32_t x, uint32_t y,
uint32_t *pxHost, uint32_t *pyHost);
/** @} */
/** @name VBVA APIs
* @{ */
RTDECL(bool) VBoxVBVAEnable(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
struct VBVABUFFER *pVBVA, int32_t cScreen);
RTDECL(void) VBoxVBVADisable(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
int32_t cScreen);
RTDECL(bool) VBoxVBVABufferBeginUpdate(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx);
RTDECL(void) VBoxVBVABufferEndUpdate(PVBVABUFFERCONTEXT pCtx);
RTDECL(bool) VBoxVBVAWrite(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
const void *pv, uint32_t cb);
RTDECL(bool) VBoxVBVAOrderSupported(PVBVABUFFERCONTEXT pCtx, unsigned code);
RTDECL(void) VBoxVBVASetupBufferContext(PVBVABUFFERCONTEXT pCtx,
uint32_t offVRAMBuffer,
uint32_t cbBuffer);
/** @} */
/** @name Modesetting APIs
* @{ */
RTDECL(uint32_t) VBoxHGSMIGetMonitorCount(PHGSMIGUESTCOMMANDCONTEXT pCtx);
RTDECL(uint32_t) VBoxVideoGetVRAMSize(void);
RTDECL(bool) VBoxVideoAnyWidthAllowed(void);
RTDECL(uint16_t) VBoxHGSMIGetScreenFlags(PHGSMIGUESTCOMMANDCONTEXT pCtx);
struct VBVAINFOVIEW;
/**
* Callback funtion called from @a VBoxHGSMISendViewInfo to initialise
* the @a VBVAINFOVIEW structure for each screen.
*
* @returns iprt status code
* @param pvData context data for the callback, passed to @a
* VBoxHGSMISendViewInfo along with the callback
* @param pInfo array of @a VBVAINFOVIEW structures to be filled in
* @todo explicitly pass the array size
*/
typedef DECLCALLBACK(int) FNHGSMIFILLVIEWINFO(void *pvData,
struct VBVAINFOVIEW *pInfo,
uint32_t cViews);
/** Pointer to a FNHGSMIFILLVIEWINFO callback */
typedef FNHGSMIFILLVIEWINFO *PFNHGSMIFILLVIEWINFO;
RTDECL(int) VBoxHGSMISendViewInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t u32Count,
PFNHGSMIFILLVIEWINFO pfnFill,
void *pvData);
RTDECL(void) VBoxVideoSetModeRegisters(uint16_t cWidth, uint16_t cHeight,
uint16_t cVirtWidth, uint16_t cBPP,
uint16_t fFlags,
uint16_t cx, uint16_t cy);
RTDECL(bool) VBoxVideoGetModeRegisters(uint16_t *pcWidth,
uint16_t *pcHeight,
uint16_t *pcVirtWidth,
uint16_t *pcBPP,
uint16_t *pfFlags);
RTDECL(void) VBoxVideoDisableVBE(void);
RTDECL(void) VBoxHGSMIProcessDisplayInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t cDisplay,
int32_t cOriginX,
int32_t cOriginY,
uint32_t offStart,
uint32_t cbPitch,
uint32_t cWidth,
uint32_t cHeight,
uint16_t cBPP,
uint16_t fFlags);
RTDECL(int) VBoxHGSMIUpdateInputMapping(PHGSMIGUESTCOMMANDCONTEXT pCtx, int32_t cOriginX, int32_t cOriginY,
uint32_t cWidth, uint32_t cHeight);
RTDECL(int) VBoxHGSMIGetModeHints(PHGSMIGUESTCOMMANDCONTEXT pCtx,
unsigned cScreens, VBVAMODEHINT *paHints);
/** @} */
RT_C_DECLS_END
#endif /* __HGSMI_GUEST_h__*/
......@@ -674,7 +674,7 @@ RT_C_DECLS_END
/** @def AssertBreak
* Assert that an expression is true and breaks if it isn't.
* In RT_STRICT mode it will hit a breakpoint before returning.
* In RT_STRICT mode it will hit a breakpoint before breaking.
*
* @param expr Expression which should be true.
*/
......@@ -696,6 +696,30 @@ RT_C_DECLS_END
break
#endif
/** @def AssertContinue
* Assert that an expression is true and continue if it isn't.
* In RT_STRICT mode it will hit a breakpoint before continuing.
*
* @param expr Expression which should be true.
*/
#ifdef RT_STRICT
# define AssertContinue(expr) \
if (RT_LIKELY(!!(expr))) \
{ /* likely */ } \
else if (1) \
{ \
RTAssertMsg1Weak(#expr, __LINE__, __FILE__, __PRETTY_FUNCTION__); \
RTAssertPanic(); \
continue; \
} else do {} while (0)
#else
# define AssertContinue(expr) \
if (RT_LIKELY(!!(expr))) \
{ /* likely */ } \
else \
continue
#endif
/** @def AssertBreakStmt
* Assert that an expression is true and breaks if it isn't.
* In RT_STRICT mode it will hit a breakpoint before doing break.
......
......@@ -73,6 +73,8 @@ DECL_FORCE_INLINE(bool) RTLocCIsBlank(int ch)
*
* @returns true / false.
* @param ch The character to test.
*
* @note Will return true of ch is '\0'!
*/
DECL_FORCE_INLINE(bool) RTLocCIsCntrl(int ch)
{
......
......@@ -1452,6 +1452,8 @@ RT_C_DECLS_END
#define VERR_LDRPE_LOAD_CONFIG_SIZE (-626)
/** The PE loader encountered a lock prefix table, a feature which hasn't been implemented yet. */
#define VERR_LDRPE_LOCK_PREFIX_TABLE (-627)
/** The PE loader encountered some Guard CF stuff in the load config. */
#define VERR_LDRPE_GUARD_CF_STUFF (-628)
/** The ELF loader doesn't handle foreign endianness. */
#define VERR_LDRELF_ODD_ENDIAN (-630)
/** The ELF image is 'dynamic', the ELF loader can only deal with 'relocatable' images at present. */
......
......@@ -1878,18 +1878,21 @@
# define RTUriParsedQuery RT_MANGLER(RTUriParsedQuery)
# define RTUriIsSchemeMatch RT_MANGLER(RTUriIsSchemeMatch)
# define RTUtf16AllocTag RT_MANGLER(RTUtf16AllocTag)
# define RTUtf16ReallocTag RT_MANGLER(RTUtf16ReallocTag)
# define RTUtf16CalcLatin1Len RT_MANGLER(RTUtf16CalcLatin1Len)
# define RTUtf16CalcLatin1LenEx RT_MANGLER(RTUtf16CalcLatin1LenEx)
# define RTUtf16CalcUtf8Len RT_MANGLER(RTUtf16CalcUtf8Len)
# define RTUtf16CalcUtf8LenEx RT_MANGLER(RTUtf16CalcUtf8LenEx)
# define RTUtf16Cmp RT_MANGLER(RTUtf16Cmp)
# define RTUtf16CmpAscii RT_MANGLER(RTUtf16CmpAscii)
# define RTUtf16CmpUtf8 RT_MANGLER(RTUtf16CmpUtf8)
# define RTUtf16DupExTag RT_MANGLER(RTUtf16DupExTag)
# define RTUtf16DupTag RT_MANGLER(RTUtf16DupTag)
# define RTUtf16Free RT_MANGLER(RTUtf16Free)
# define RTUtf16GetCpExInternal RT_MANGLER(RTUtf16GetCpExInternal)
# define RTUtf16GetCpInternal RT_MANGLER(RTUtf16GetCpInternal)
# define RTUtf16ICmp RT_MANGLER(RTUtf16ICmp)
# define RTUtf16ICmpUtf8 RT_MANGLER(RTUtf16ICmpUtf8)
# define RTUtf16IsValidEncoding RT_MANGLER(RTUtf16IsValidEncoding)
# define RTUtf16Len RT_MANGLER(RTUtf16Len)
# define RTUtf16LocaleICmp RT_MANGLER(RTUtf16LocaleICmp)
......
......@@ -81,6 +81,78 @@ RT_C_DECLS_BEGIN
*/
RTDECL(PRTUTF16) RTUtf16AllocTag(size_t cb, const char *pszTag);
/**
* Reallocates the specified UTF-16 string (default tag).
*
* You should normally not use this function, except if there is some very
* custom string handling you need doing that isn't covered by any of the other
* APIs.
*
* @returns VINF_SUCCESS.
* @retval VERR_NO_UTF16_MEMORY if we failed to reallocate the string, @a
* *ppwsz remains unchanged.
*
* @param ppwsz Pointer to the string variable containing the
* input and output string.
*
* When not freeing the string, the result will
* always have the last RTUTF16 set to the
* terminator character so that when used for
* string truncation the result will be a valid
* C-style string (your job to keep it a valid
* UTF-16 string).
*
* When the input string is NULL and we're supposed
* to reallocate, the returned string will also
* have the first RTUTF16 set to the terminator
* char so it will be a valid C-style string.
*
* @param cbNew When @a cbNew is zero, we'll behave like
* RTUtf16Free and @a *ppwsz will be set to NULL.
*
* When not zero, this will be rounded up to a
* multiple of two, and used as the new size of the
* memory backing the string, i.e. it includes the
* terminator (RTUTF16) char.
*/
#define RTUtf16Realloc(ppwsz, cb) RTUtf16ReallocTag((ppwsz), (cb), RTSTR_TAG)
/**
* Reallocates the specified UTF-16 string (custom tag).
*
* You should normally not use this function, except if there is some very
* custom string handling you need doing that isn't covered by any of the other
* APIs.
*
* @returns VINF_SUCCESS.
* @retval VERR_NO_UTF16_MEMORY if we failed to reallocate the string, @a
* *ppwsz remains unchanged.
*
* @param ppwsz Pointer to the string variable containing the
* input and output string.
*
* When not freeing the string, the result will
* always have the last RTUTF16 set to the
* terminator character so that when used for
* string truncation the result will be a valid
* C-style string (your job to keep it a valid
* UTF-16 string).
*
* When the input string is NULL and we're supposed
* to reallocate, the returned string will also
* have the first RTUTF16 set to the terminator
* char so it will be a valid C-style string.
*
* @param cbNew When @a cbNew is zero, we'll behave like
* RTUtf16Free and @a *ppwsz will be set to NULL.
*
* When not zero, this will be rounded up to a
* multiple of two, and used as the new size of the
* memory backing the string, i.e. it includes the
* terminator (RTUTF16) char.
* @param pszTag Allocation tag used for statistics and such.
*/
RTDECL(int) RTUtf16ReallocTag(PRTUTF16 *ppwsz, size_t cbNew, const char *pszTag);
/**
* Free a UTF-16 string allocated by RTStrToUtf16(), RTStrToUtf16Ex(),
......@@ -331,6 +403,19 @@ RTDECL(int) RTUtf16Cmp(PCRTUTF16 pwsz1, PCRTUTF16 pwsz2);
*/
RTDECL(int) RTUtf16CmpAscii(PCRTUTF16 pwsz1, const char *psz2);
/**
* Performs a case sensitive string compare between an UTF-16 string and a UTF-8
* string.
*
* @returns < 0 if the first string less than the second string.s
* @returns 0 if the first string identical to the second string.
* @returns > 0 if the first string greater than the second string.
* @param pwsz1 First UTF-16 string. Null is allowed.
* @param psz2 Second string, UTF-8. Null is allowed.
* @remarks NULL and empty strings are treated equally.
*/
RTDECL(int) RTUtf16CmpUtf8(PCRTUTF16 pwsz1, const char *psz2);
/**
* Performs a case insensitive string compare between two UTF-16 strings.
*
......@@ -346,6 +431,19 @@ RTDECL(int) RTUtf16CmpAscii(PCRTUTF16 pwsz1, const char *psz2);
*/
RTDECL(int) RTUtf16ICmp(PCRTUTF16 pwsz1, PCRTUTF16 pwsz2);
/**
* Performs a case insensitive string compare between an UTF-16 string and a
* UTF-8 string.
*
* @returns < 0 if the first string less than the second string.s
* @returns 0 if the first string identical to the second string.
* @returns > 0 if the first string greater than the second string.
* @param pwsz1 First UTF-16 string. Null is allowed.
* @param psz2 Second string, UTF-8. Null is allowed.
* @remarks NULL and empty strings are treated equally.
*/
RTDECL(int) RTUtf16ICmpUtf8(PCRTUTF16 pwsz1, const char *psz2);
/**
* Performs a case insensitive string compare between an UTF-16 string and an
* pure ASCII string.
......
/* $Id: the-linux-kernel.h $ */
/** @file
* IPRT - Include all necessary headers for the Linux kernel.
*/
/*
* Copyright (C) 2006-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
#ifndef ___the_linux_kernel_h
#define ___the_linux_kernel_h
/*
* Include iprt/types.h to install the bool wrappers.
* Then use the linux bool type for all the stuff include here.
*/
#include <iprt/types.h>
#define bool linux_bool
#include <linux/version.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33)
# include <generated/autoconf.h>
#else
# ifndef AUTOCONF_INCLUDED
# include <linux/autoconf.h>
# endif
#endif
/* We only support 2.4 and 2.6 series kernels */
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)
# error We only support 2.4 and 2.6 series kernels
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 0) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
# error We only support 2.4 and 2.6 series kernels
#endif
#if defined(CONFIG_MODVERSIONS) && !defined(MODVERSIONS)
# define MODVERSIONS
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 71)
# include <linux/modversions.h>
# endif
#endif
#ifndef KBUILD_STR
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16)
# define KBUILD_STR(s) s
# else
# define KBUILD_STR(s) #s
# endif
#endif
# if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0)
# include <linux/kconfig.h> /* for macro IS_ENABLED */
# endif
#include <linux/string.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)
# include <linux/semaphore.h>
#else /* older kernels */
# include <asm/semaphore.h>
#endif /* older kernels */
#include <linux/module.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
# include <linux/moduleparam.h>
#endif
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/fs.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
# include <linux/namei.h>
#endif
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/sched.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
# include <linux/sched/rt.h>
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 7)
# include <linux/jiffies.h>
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 16)
# include <linux/ktime.h>
# include <linux/hrtimer.h>
#endif
#include <linux/wait.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 71)
# include <linux/cpu.h>
# include <linux/notifier.h>
#endif
/* For the basic additions module */
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/completion.h>
#include <linux/compiler.h>
#ifndef HAVE_UNLOCKED_IOCTL /* linux/fs.h defines this */
# include <linux/smp_lock.h>
#endif
/* For the shared folders module */
#include <linux/vmalloc.h>
#define wchar_t linux_wchar_t
#include <linux/nls.h>
#undef wchar_t
#include <asm/mman.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/div64.h>
/* For thread-context hooks. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18) && defined(CONFIG_PREEMPT_NOTIFIERS)
# include <linux/preempt.h>
#endif
/* for workqueue / task queues. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 41)
# include <linux/workqueue.h>
#else
# include <linux/tqueue.h>
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
# include <linux/kthread.h>
#endif
/* for cr4_init_shadow() / cpu_tlbstate. */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 20, 0)
# include <asm/tlbflush.h>
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)
# include <asm/smap.h>
#else
static inline void clac(void) { }
static inline void stac(void) { }
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
# ifndef page_to_pfn
# define page_to_pfn(page) ((page) - mem_map)
# endif
#endif
#ifndef DEFINE_WAIT
# define DEFINE_WAIT(name) DECLARE_WAITQUEUE(name, current)
#endif
#ifndef __GFP_NOWARN
# define __GFP_NOWARN 0
#endif
/*
* 2.4 / early 2.6 compatibility wrappers
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7)
# ifndef MAX_JIFFY_OFFSET
# define MAX_JIFFY_OFFSET ((~0UL >> 1)-1)
# endif
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 29) || LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)
DECLINLINE(unsigned int) jiffies_to_msecs(unsigned long cJiffies)
{
# if HZ <= 1000 && !(1000 % HZ)
return (1000 / HZ) * cJiffies;
# elif HZ > 1000 && !(HZ % 1000)
return (cJiffies + (HZ / 1000) - 1) / (HZ / 1000);
# else
return (cJiffies * 1000) / HZ;
# endif
}
DECLINLINE(unsigned long) msecs_to_jiffies(unsigned int cMillies)
{
# if HZ > 1000
if (cMillies > jiffies_to_msecs(MAX_JIFFY_OFFSET))
return MAX_JIFFY_OFFSET;
# endif
# if HZ <= 1000 && !(1000 % HZ)
return (cMillies + (1000 / HZ) - 1) / (1000 / HZ);
# elif HZ > 1000 && !(HZ % 1000)
return cMillies * (HZ / 1000);
# else
return (cMillies * HZ + 999) / 1000;
# endif
}
# endif /* < 2.4.29 || >= 2.6.0 */
#endif /* < 2.6.7 */
/*
* 2.4 compatibility wrappers
*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
# define prepare_to_wait(q, wait, state) \
do { \
add_wait_queue(q, wait); \
set_current_state(state); \
} while (0)
# define after_wait(wait) \
do { \
list_del_init(&(wait)->task_list); \
} while (0)
# define finish_wait(q, wait) \
do { \
set_current_state(TASK_RUNNING); \
remove_wait_queue(q, wait); \
} while (0)
#else /* >= 2.6.0 */
# define after_wait(wait) do {} while (0)
#endif /* >= 2.6.0 */
/** @def TICK_NSEC
* The time between ticks in nsec */
#ifndef TICK_NSEC
# define TICK_NSEC (1000000000UL / HZ)
#endif
/*
* This sucks soooo badly on x86! Why don't they export __PAGE_KERNEL_EXEC so PAGE_KERNEL_EXEC would be usable?
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8) && defined(RT_ARCH_AMD64)
# define MY_PAGE_KERNEL_EXEC PAGE_KERNEL_EXEC
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8) && defined(PAGE_KERNEL_EXEC) && defined(CONFIG_X86_PAE)
# ifdef __PAGE_KERNEL_EXEC
/* >= 2.6.27 */
# define MY_PAGE_KERNEL_EXEC __pgprot(cpu_has_pge ? __PAGE_KERNEL_EXEC | _PAGE_GLOBAL : __PAGE_KERNEL_EXEC)
# else
# define MY_PAGE_KERNEL_EXEC __pgprot(cpu_has_pge ? _PAGE_KERNEL_EXEC | _PAGE_GLOBAL : _PAGE_KERNEL_EXEC)
# endif
#else
# define MY_PAGE_KERNEL_EXEC PAGE_KERNEL
#endif
/*
* The redhat hack section.
* - The current hacks are for 2.4.21-15.EL only.
*/
#ifndef NO_REDHAT_HACKS
/* accounting. */
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
# ifdef VM_ACCOUNT
# define USE_RHEL4_MUNMAP
# endif
# endif
/* backported remap_page_range. */
# if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)
# include <asm/tlb.h>
# ifdef tlb_vma /* probably not good enough... */
# define HAVE_26_STYLE_REMAP_PAGE_RANGE 1
# endif
# endif
# ifndef RT_ARCH_AMD64
/* In 2.6.9-22.ELsmp we have to call change_page_attr() twice when changing
* the page attributes from PAGE_KERNEL to something else, because there appears
* to be a bug in one of the many patches that redhat applied.
* It should be safe to do this on less buggy linux kernels too. ;-)
*/
# define MY_CHANGE_PAGE_ATTR(pPages, cPages, prot) \
do { \
if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) \
change_page_attr(pPages, cPages, prot); \
change_page_attr(pPages, cPages, prot); \
} while (0)
# endif /* !RT_ARCH_AMD64 */
#endif /* !NO_REDHAT_HACKS */
#ifndef MY_CHANGE_PAGE_ATTR
# ifdef RT_ARCH_AMD64 /** @todo This is a cheap hack, but it'll get around that 'else BUG();' in __change_page_attr(). */
# define MY_CHANGE_PAGE_ATTR(pPages, cPages, prot) \
do { \
change_page_attr(pPages, cPages, PAGE_KERNEL_NOCACHE); \
change_page_attr(pPages, cPages, prot); \
} while (0)
# else
# define MY_CHANGE_PAGE_ATTR(pPages, cPages, prot) change_page_attr(pPages, cPages, prot)
# endif
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25)
# define MY_SET_PAGES_EXEC(pPages, cPages) set_pages_x(pPages, cPages)
# define MY_SET_PAGES_NOEXEC(pPages, cPages) set_pages_nx(pPages, cPages)
#else
# define MY_SET_PAGES_EXEC(pPages, cPages) \
do { \
if (pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL)) \
MY_CHANGE_PAGE_ATTR(pPages, cPages, MY_PAGE_KERNEL_EXEC); \
} while (0)
# define MY_SET_PAGES_NOEXEC(pPages, cPages) \
do { \
if (pgprot_val(MY_PAGE_KERNEL_EXEC) != pgprot_val(PAGE_KERNEL)) \
MY_CHANGE_PAGE_ATTR(pPages, cPages, PAGE_KERNEL); \
} while (0)
#endif
/** @def ONE_MSEC_IN_JIFFIES
* The number of jiffies that make up 1 millisecond. Must be at least 1! */
#if HZ <= 1000
# define ONE_MSEC_IN_JIFFIES 1
#elif !(HZ % 1000)
# define ONE_MSEC_IN_JIFFIES (HZ / 1000)
#else
# define ONE_MSEC_IN_JIFFIES ((HZ + 999) / 1000)
# error "HZ is not a multiple of 1000, the GIP stuff won't work right!"
#endif
/*
* Stop using the linux bool type.
*/
#undef bool
/*
* There are post-2.6.24 kernels (confusingly with unchanged version number)
* which eliminate macros which were marked as deprecated.
*/
#ifndef __attribute_used__
#define __attribute_used__ __used
#endif
/**
* Hack for shortening pointers on linux so we can stuff more stuff into the
* task_struct::comm field. This is used by the semaphore code but put here
* because we don't have any better place atm. Don't use outside IPRT, please.
*/
#ifdef RT_ARCH_AMD64
# define IPRT_DEBUG_SEMS_ADDRESS(addr) ( ((long)(addr) & (long)~UINT64_C(0xfffffff000000000)) )
#else
# define IPRT_DEBUG_SEMS_ADDRESS(addr) ( (long)(addr) )
#endif
/**
* Puts semaphore info into the task_struct::comm field if IPRT_DEBUG_SEMS is
* defined.
*/
#ifdef IPRT_DEBUG_SEMS
# define IPRT_DEBUG_SEMS_STATE(pThis, chState) \
snprintf(current->comm, sizeof(current->comm), "%c%lx", (chState), IPRT_DEBUG_SEMS_ADDRESS(pThis));
#else
# define IPRT_DEBUG_SEMS_STATE(pThis, chState) do { } while (0)
#endif
/**
* Puts semaphore info into the task_struct::comm field if IPRT_DEBUG_SEMS is
* defined.
*/
#ifdef IPRT_DEBUG_SEMS
# define IPRT_DEBUG_SEMS_STATE_RC(pThis, chState, rc) \
snprintf(current->comm, sizeof(current->comm), "%c%lx:%d", (chState), IPRT_DEBUG_SEMS_ADDRESS(pThis), rc);
#else
# define IPRT_DEBUG_SEMS_STATE_RC(pThis, chState, rc) do { } while (0)
#endif
/** @name Macros for preserving EFLAGS.AC on 3.19+/amd64 paranoid.
* The AMD 64 switch_to in macro in arch/x86/include/asm/switch_to.h stopped
* restoring flags.
* @{ */
#if defined(CONFIG_X86_SMAP) || defined(RT_STRICT) || defined(IPRT_WITH_EFLAGS_AC_PRESERVING)
# include <iprt/asm-amd64-x86.h>
# define IPRT_X86_EFL_AC RT_BIT(18)
# define IPRT_LINUX_SAVE_EFL_AC() RTCCUINTREG fSavedEfl = ASMGetFlags()
# define IPRT_LINUX_RESTORE_EFL_AC() ASMSetFlags(fSavedEfl)
# define IPRT_LINUX_RESTORE_EFL_ONLY_AC() ASMChangeFlags(~IPRT_X86_EFL_AC, fSavedEfl & IPRT_X86_EFL_AC)
#else
# define IPRT_LINUX_SAVE_EFL_AC() do { } while (0)
# define IPRT_LINUX_RESTORE_EFL_AC() do { } while (0)
# define IPRT_LINUX_RESTORE_EFL_ONLY_AC() do { } while (0)
#endif
/** @} */
/*
* There are some conflicting defines in iprt/param.h, sort them out here.
*/
#ifndef ___iprt_param_h
# undef PAGE_SIZE
# undef PAGE_OFFSET_MASK
# include <iprt/param.h>
#endif
/*
* Some global indicator macros.
*/
/** @def IPRT_LINUX_HAS_HRTIMER
* Whether the kernel support high resolution timers (Linux kernel versions
* 2.6.28 and later (hrtimer_add_expires_ns() & schedule_hrtimeout). */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)
# define IPRT_LINUX_HAS_HRTIMER
#endif
/*
* Workqueue stuff, see initterm-r0drv-linux.c.
*/
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 41)
typedef struct work_struct RTR0LNXWORKQUEUEITEM;
#else
typedef struct tq_struct RTR0LNXWORKQUEUEITEM;
#endif
DECLHIDDEN(void) rtR0LnxWorkqueuePush(RTR0LNXWORKQUEUEITEM *pWork, void (*pfnWorker)(RTR0LNXWORKQUEUEITEM *));
DECLHIDDEN(void) rtR0LnxWorkqueueFlush(void);
#endif
......@@ -372,11 +372,17 @@ static int rtR0MemObjLinuxAllocPages(PRTR0MEMOBJLNX *ppMemLnx, RTR0MEMOBJTYPE en
#endif /* < 2.4.22 */
pMemLnx->fContiguous = fContiguous;
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
/*
* Reserve the pages.
*
* Linux >= 4.5 with CONFIG_DEBUG_VM panics when setting PG_reserved on compound
* pages. According to Michal Hocko this shouldn't be necessary anyway because
* as pages which are not on the LRU list are never evictable.
*/
for (iPage = 0; iPage < cPages; iPage++)
SetPageReserved(pMemLnx->apPages[iPage]);
#endif
/*
* Note that the physical address of memory allocated with alloc_pages(flags, order)
......@@ -423,7 +429,12 @@ static void rtR0MemObjLinuxFreePages(PRTR0MEMOBJLNX pMemLnx)
*/
while (iPage-- > 0)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 5, 0)
/*
* See SetPageReserved() in rtR0MemObjLinuxAllocPages()
*/
ClearPageReserved(pMemLnx->apPages[iPage]);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 22)
#else
MY_SET_PAGES_NOEXEC(pMemLnx->apPages[iPage], 1);
......@@ -578,7 +589,11 @@ DECLHIDDEN(int) rtR0MemObjNativeFree(RTR0MEMOBJ pMem)
{
if (!PageReserved(pMemLnx->apPages[iPage]))
SetPageDirty(pMemLnx->apPages[iPage]);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
put_page(pMemLnx->apPages[iPage]);
#else
page_cache_release(pMemLnx->apPages[iPage]);
#endif
}
if (pTask && pTask->mm)
......@@ -1029,14 +1044,38 @@ DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3P
/*
* Get user pages.
*/
rc = get_user_pages(pTask, /* Task for fault accounting. */
pTask->mm, /* Whose pages. */
R3Ptr, /* Where from. */
cPages, /* How many pages. */
fWrite, /* Write to memory. */
fWrite, /* force write access. */
&pMemLnx->apPages[0], /* Page array. */
papVMAs); /* vmas */
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
if (R0Process == RTR0ProcHandleSelf())
rc = get_user_pages(R3Ptr, /* Where from. */
cPages, /* How many pages. */
fWrite, /* Write to memory. */
fWrite, /* force write access. */
&pMemLnx->apPages[0], /* Page array. */
papVMAs); /* vmas */
/*
* Actually this should not happen at the moment as call this function
* only for our own process.
*/
else
rc = get_user_pages_remote(
pTask, /* Task for fault accounting. */
pTask->mm, /* Whose pages. */
R3Ptr, /* Where from. */
cPages, /* How many pages. */
fWrite, /* Write to memory. */
fWrite, /* force write access. */
&pMemLnx->apPages[0], /* Page array. */
papVMAs); /* vmas */
#else /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) */
rc = get_user_pages(pTask, /* Task for fault accounting. */
pTask->mm, /* Whose pages. */
R3Ptr, /* Where from. */
cPages, /* How many pages. */
fWrite, /* Write to memory. */
fWrite, /* force write access. */
&pMemLnx->apPages[0], /* Page array. */
papVMAs); /* vmas */
#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0) */
if (rc == cPages)
{
/*
......@@ -1081,7 +1120,11 @@ DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3P
{
if (!PageReserved(pMemLnx->apPages[rc]))
SetPageDirty(pMemLnx->apPages[rc]);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
put_page(pMemLnx->apPages[rc]);
#else
page_cache_release(pMemLnx->apPages[rc]);
#endif
}
up_read(&pTask->mm->mmap_sem);
......
#define VBOX_SVN_REV 105871
#define VBOX_SVN_REV 106667
......@@ -3,9 +3,9 @@
#define VBOX_VERSION_MAJOR 5
#define VBOX_VERSION_MINOR 0
#define VBOX_VERSION_BUILD 16
#define VBOX_VERSION_STRING_RAW "5.0.16"
#define VBOX_VERSION_STRING "5.0.16_Ubuntu"
#define VBOX_VERSION_BUILD 18
#define VBOX_VERSION_STRING_RAW "5.0.18"
#define VBOX_VERSION_STRING "5.0.18_Ubuntu"
#define VBOX_API_VERSION_STRING "5_0"
#define VBOX_PRIVATE_BUILD_DESC "Private build by root"
......
......@@ -712,7 +712,11 @@ int sf_write_end(struct file *file, struct address_space *mapping, loff_t pos,
}
unlock_page(page);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 6, 0)
put_page(page);
#else
page_cache_release(page);
#endif
return nwritten;
}
......
......@@ -3,9 +3,9 @@
#define VBOX_VERSION_MAJOR 5
#define VBOX_VERSION_MINOR 0
#define VBOX_VERSION_BUILD 16
#define VBOX_VERSION_STRING_RAW "5.0.16"
#define VBOX_VERSION_STRING "5.0.16_Ubuntu"
#define VBOX_VERSION_BUILD 18
#define VBOX_VERSION_STRING_RAW "5.0.18"
#define VBOX_VERSION_STRING "5.0.18_Ubuntu"
#define VBOX_API_VERSION_STRING "5_0"
#define VBOX_PRIVATE_BUILD_DESC "Private build by root"
......
/* $Id: HGSMIBase.cpp $ */
/** @file
* VirtualBox Video driver, common code - HGSMI initialisation and helper
* functions.
*/
/*
* Copyright (C) 2006-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#include <VBox/VBoxVideoGuest.h>
#include <VBox/VBoxVideo.h>
#include <VBox/VBoxGuest.h>
#include <VBox/Hardware/VBoxVideoVBE.h>
#include <VBox/VMMDev.h>
#include <iprt/asm.h>
#include <iprt/log.h>
#include <iprt/string.h>
/** Send completion notification to the host for the command located at offset
* @a offt into the host command buffer. */
static void HGSMINotifyHostCmdComplete(PHGSMIHOSTCOMMANDCONTEXT pCtx, HGSMIOFFSET offt)
{
VBoxVideoCmnPortWriteUlong(pCtx->port, offt);
}
/**
* Inform the host that a command has been handled.
*
* @param pCtx the context containing the heap to be used
* @param pvMem pointer into the heap as mapped in @a pCtx to the command to
* be completed
*/
RTDECL(void) VBoxHGSMIHostCmdComplete(PHGSMIHOSTCOMMANDCONTEXT pCtx,
void *pvMem)
{
HGSMIBUFFERHEADER *pHdr = HGSMIBufferHeaderFromData(pvMem);
HGSMIOFFSET offMem = HGSMIPointerToOffset(&pCtx->areaCtx, pHdr);
Assert(offMem != HGSMIOFFSET_VOID);
if(offMem != HGSMIOFFSET_VOID)
{
HGSMINotifyHostCmdComplete(pCtx, offMem);
}
}
/** Submit an incoming host command to the appropriate handler. */
static void hgsmiHostCmdProcess(PHGSMIHOSTCOMMANDCONTEXT pCtx,
HGSMIOFFSET offBuffer)
{
int rc = HGSMIBufferProcess(&pCtx->areaCtx, &pCtx->channels, offBuffer);
Assert(!RT_FAILURE(rc));
if(RT_FAILURE(rc))
{
/* failure means the command was not submitted to the handler for some reason
* it's our responsibility to notify its completion in this case */
HGSMINotifyHostCmdComplete(pCtx, offBuffer);
}
/* if the cmd succeeded it's responsibility of the callback to complete it */
}
/** Get the next command from the host. */
static HGSMIOFFSET hgsmiGetHostBuffer(PHGSMIHOSTCOMMANDCONTEXT pCtx)
{
return VBoxVideoCmnPortReadUlong(pCtx->port);
}
/** Get and handle the next command from the host. */
static void hgsmiHostCommandQueryProcess(PHGSMIHOSTCOMMANDCONTEXT pCtx)
{
HGSMIOFFSET offset = hgsmiGetHostBuffer(pCtx);
AssertReturnVoid(offset != HGSMIOFFSET_VOID);
hgsmiHostCmdProcess(pCtx, offset);
}
/** Drain the host command queue. */
RTDECL(void) VBoxHGSMIProcessHostQueue(PHGSMIHOSTCOMMANDCONTEXT pCtx)
{
while (pCtx->pfHostFlags->u32HostFlags & HGSMIHOSTFLAGS_COMMANDS_PENDING)
{
if (!ASMAtomicCmpXchgBool(&pCtx->fHostCmdProcessing, true, false))
return;
hgsmiHostCommandQueryProcess(pCtx);
ASMAtomicWriteBool(&pCtx->fHostCmdProcessing, false);
}
}
/** Detect whether HGSMI is supported by the host. */
RTDECL(bool) VBoxHGSMIIsSupported(void)
{
uint16_t DispiId;
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX, VBE_DISPI_INDEX_ID);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_DATA, VBE_DISPI_ID_HGSMI);
DispiId = VBoxVideoCmnPortReadUshort(VBE_DISPI_IOPORT_DATA);
return (DispiId == VBE_DISPI_ID_HGSMI);
}
/**
* Allocate and initialise a command descriptor in the guest heap for a
* guest-to-host command.
*
* @returns pointer to the descriptor's command data buffer
* @param pCtx the context containing the heap to be used
* @param cbData the size of the command data to go into the descriptor
* @param u8Ch the HGSMI channel to be used, set to the descriptor
* @param u16Op the HGSMI command to be sent, set to the descriptor
*/
RTDECL(void *) VBoxHGSMIBufferAlloc(PHGSMIGUESTCOMMANDCONTEXT pCtx,
HGSMISIZE cbData,
uint8_t u8Ch,
uint16_t u16Op)
{
#ifdef VBOX_WDDM_MINIPORT
return VBoxSHGSMIHeapAlloc (&pCtx->heapCtx, cbData, u8Ch, u16Op);
#else
return HGSMIHeapAlloc (&pCtx->heapCtx, cbData, u8Ch, u16Op);
#endif
}
/**
* Free a descriptor allocated by @a VBoxHGSMIBufferAlloc.
*
* @param pCtx the context containing the heap used
* @param pvBuffer the pointer returned by @a VBoxHGSMIBufferAlloc
*/
RTDECL(void) VBoxHGSMIBufferFree(PHGSMIGUESTCOMMANDCONTEXT pCtx,
void *pvBuffer)
{
#ifdef VBOX_WDDM_MINIPORT
VBoxSHGSMIHeapFree (&pCtx->heapCtx, pvBuffer);
#else
HGSMIHeapFree (&pCtx->heapCtx, pvBuffer);
#endif
}
/**
* Submit a command descriptor allocated by @a VBoxHGSMIBufferAlloc.
*
* @param pCtx the context containing the heap used
* @param pvBuffer the pointer returned by @a VBoxHGSMIBufferAlloc
*/
RTDECL(int) VBoxHGSMIBufferSubmit(PHGSMIGUESTCOMMANDCONTEXT pCtx,
void *pvBuffer)
{
/* Initialize the buffer and get the offset for port IO. */
HGSMIOFFSET offBuffer = HGSMIHeapBufferOffset (HGSMIGUESTCMDHEAP_GET(&pCtx->heapCtx), pvBuffer);
Assert(offBuffer != HGSMIOFFSET_VOID);
if (offBuffer != HGSMIOFFSET_VOID)
{
/* Submit the buffer to the host. */
VBoxVideoCmnPortWriteUlong(pCtx->port, offBuffer);
/* Make the compiler aware that the host has changed memory. */
ASMCompilerBarrier();
return VINF_SUCCESS;
}
return VERR_INVALID_PARAMETER;
}
/** Inform the host of the location of the host flags in VRAM via an HGSMI
* command. */
static int vboxHGSMIReportFlagsLocation(PHGSMIGUESTCOMMANDCONTEXT pCtx,
HGSMIOFFSET offLocation)
{
HGSMIBUFFERLOCATION *p;
int rc = VINF_SUCCESS;
/* Allocate the IO buffer. */
p = (HGSMIBUFFERLOCATION *)VBoxHGSMIBufferAlloc(pCtx,
sizeof(HGSMIBUFFERLOCATION),
HGSMI_CH_HGSMI,
HGSMI_CC_HOST_FLAGS_LOCATION);
if (p)
{
/* Prepare data to be sent to the host. */
p->offLocation = offLocation;
p->cbLocation = sizeof(HGSMIHOSTFLAGS);
rc = VBoxHGSMIBufferSubmit(pCtx, p);
/* Free the IO buffer. */
VBoxHGSMIBufferFree(pCtx, p);
}
else
rc = VERR_NO_MEMORY;
return rc;
}
/**
* Inform the host of the location of the host flags in VRAM via an HGSMI
* command.
* @returns IPRT status value.
* @returns VERR_NOT_IMPLEMENTED if the host does not support the command.
* @returns VERR_NO_MEMORY if a heap allocation fails.
* @param pCtx the context of the guest heap to use.
* @param offLocation the offset chosen for the flags withing guest
* VRAM.
*/
RTDECL(int) VBoxHGSMIReportFlagsLocation(PHGSMIGUESTCOMMANDCONTEXT pCtx,
HGSMIOFFSET offLocation)
{
return vboxHGSMIReportFlagsLocation(pCtx, offLocation);
}
/** Notify the host of HGSMI-related guest capabilities via an HGSMI command.
*/
static int vboxHGSMISendCapsInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t fCaps)
{
VBVACAPS *pCaps;
int rc = VINF_SUCCESS;
/* Allocate the IO buffer. */
pCaps = (VBVACAPS *)VBoxHGSMIBufferAlloc(pCtx,
sizeof(VBVACAPS), HGSMI_CH_VBVA,
VBVA_INFO_CAPS);
if (pCaps)
{
/* Prepare data to be sent to the host. */
pCaps->rc = VERR_NOT_IMPLEMENTED;
pCaps->fCaps = fCaps;
rc = VBoxHGSMIBufferSubmit(pCtx, pCaps);
if (RT_SUCCESS(rc))
{
AssertRC(pCaps->rc);
rc = pCaps->rc;
}
/* Free the IO buffer. */
VBoxHGSMIBufferFree(pCtx, pCaps);
}
else
rc = VERR_NO_MEMORY;
return rc;
}
/**
* Notify the host of HGSMI-related guest capabilities via an HGSMI command.
* @returns IPRT status value.
* @returns VERR_NOT_IMPLEMENTED if the host does not support the command.
* @returns VERR_NO_MEMORY if a heap allocation fails.
* @param pCtx the context of the guest heap to use.
* @param fCaps the capabilities to report, see VBVACAPS.
*/
RTDECL(int) VBoxHGSMISendCapsInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t fCaps)
{
return vboxHGSMISendCapsInfo(pCtx, fCaps);
}
/** Tell the host about the location of the area of VRAM set aside for the host
* heap. */
static int vboxHGSMIReportHostArea(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t u32AreaOffset, uint32_t u32AreaSize)
{
VBVAINFOHEAP *p;
int rc = VINF_SUCCESS;
/* Allocate the IO buffer. */
p = (VBVAINFOHEAP *)VBoxHGSMIBufferAlloc(pCtx,
sizeof (VBVAINFOHEAP), HGSMI_CH_VBVA,
VBVA_INFO_HEAP);
if (p)
{
/* Prepare data to be sent to the host. */
p->u32HeapOffset = u32AreaOffset;
p->u32HeapSize = u32AreaSize;
rc = VBoxHGSMIBufferSubmit(pCtx, p);
/* Free the IO buffer. */
VBoxHGSMIBufferFree(pCtx, p);
}
else
rc = VERR_NO_MEMORY;
return rc;
}
/**
* Get the information needed to map the basic communication structures in
* device memory into our address space. All pointer parameters are optional.
*
* @param cbVRAM how much video RAM is allocated to the device
* @param poffVRAMBaseMapping where to save the offset from the start of the
* device VRAM of the whole area to map
* @param pcbMapping where to save the mapping size
* @param poffGuestHeapMemory where to save the offset into the mapped area
* of the guest heap backing memory
* @param pcbGuestHeapMemory where to save the size of the guest heap
* backing memory
* @param poffHostFlags where to save the offset into the mapped area
* of the host flags
*/
RTDECL(void) VBoxHGSMIGetBaseMappingInfo(uint32_t cbVRAM,
uint32_t *poffVRAMBaseMapping,
uint32_t *pcbMapping,
uint32_t *poffGuestHeapMemory,
uint32_t *pcbGuestHeapMemory,
uint32_t *poffHostFlags)
{
AssertPtrNullReturnVoid(poffVRAMBaseMapping);
AssertPtrNullReturnVoid(pcbMapping);
AssertPtrNullReturnVoid(poffGuestHeapMemory);
AssertPtrNullReturnVoid(pcbGuestHeapMemory);
AssertPtrNullReturnVoid(poffHostFlags);
if (poffVRAMBaseMapping)
*poffVRAMBaseMapping = cbVRAM - VBVA_ADAPTER_INFORMATION_SIZE;
if (pcbMapping)
*pcbMapping = VBVA_ADAPTER_INFORMATION_SIZE;
if (poffGuestHeapMemory)
*poffGuestHeapMemory = 0;
if (pcbGuestHeapMemory)
*pcbGuestHeapMemory = VBVA_ADAPTER_INFORMATION_SIZE
- sizeof(HGSMIHOSTFLAGS);
if (poffHostFlags)
*poffHostFlags = VBVA_ADAPTER_INFORMATION_SIZE
- sizeof(HGSMIHOSTFLAGS);
}
/**
* Set up the HGSMI guest-to-host command context.
* @returns iprt status value
* @param pCtx the context to set up
* @param pvGuestHeapMemory a pointer to the mapped backing memory for
* the guest heap
* @param cbGuestHeapMemory the size of the backing memory area
* @param offVRAMGuestHeapMemory the offset of the memory pointed to by
* @a pvGuestHeapMemory within the video RAM
*/
RTDECL(int) VBoxHGSMISetupGuestContext(PHGSMIGUESTCOMMANDCONTEXT pCtx,
void *pvGuestHeapMemory,
uint32_t cbGuestHeapMemory,
uint32_t offVRAMGuestHeapMemory,
const HGSMIENV *pEnv)
{
/** @todo should we be using a fixed ISA port value here? */
pCtx->port = (RTIOPORT)VGA_PORT_HGSMI_GUEST;
#ifdef VBOX_WDDM_MINIPORT
return VBoxSHGSMIInit(&pCtx->heapCtx, pvGuestHeapMemory,
cbGuestHeapMemory, offVRAMGuestHeapMemory, pEnv);
#else
return HGSMIHeapSetup(&pCtx->heapCtx, pvGuestHeapMemory,
cbGuestHeapMemory, offVRAMGuestHeapMemory, pEnv);
#endif
}
/**
* Get the information needed to map the area used by the host to send back
* requests.
*
* @param pCtx the context containing the heap to use
* @param cbVRAM how much video RAM is allocated to the device
* @param offVRAMBaseMapping the offset of the basic communication structures
* into the guest's VRAM
* @param poffVRAMHostArea where to store the offset into VRAM of the host
* heap area
* @param pcbHostArea where to store the size of the host heap area
*/
RTDECL(void) VBoxHGSMIGetHostAreaMapping(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t cbVRAM,
uint32_t offVRAMBaseMapping,
uint32_t *poffVRAMHostArea,
uint32_t *pcbHostArea)
{
uint32_t offVRAMHostArea = offVRAMBaseMapping, cbHostArea = 0;
AssertPtrReturnVoid(poffVRAMHostArea);
AssertPtrReturnVoid(pcbHostArea);
VBoxQueryConfHGSMI(pCtx, VBOX_VBVA_CONF32_HOST_HEAP_SIZE, &cbHostArea);
if (cbHostArea != 0)
{
uint32_t cbHostAreaMaxSize = cbVRAM / 4;
/** @todo what is the idea of this? */
if (cbHostAreaMaxSize >= VBVA_ADAPTER_INFORMATION_SIZE)
{
cbHostAreaMaxSize -= VBVA_ADAPTER_INFORMATION_SIZE;
}
if (cbHostArea > cbHostAreaMaxSize)
{
cbHostArea = cbHostAreaMaxSize;
}
/* Round up to 4096 bytes. */
cbHostArea = (cbHostArea + 0xFFF) & ~0xFFF;
offVRAMHostArea = offVRAMBaseMapping - cbHostArea;
}
*pcbHostArea = cbHostArea;
*poffVRAMHostArea = offVRAMHostArea;
LogFunc(("offVRAMHostArea = 0x%08X, cbHostArea = 0x%08X\n",
offVRAMHostArea, cbHostArea));
}
/**
* Initialise the host context structure.
*
* @param pCtx the context structure to initialise
* @param pvBaseMapping where the basic HGSMI structures are mapped at
* @param offHostFlags the offset of the host flags into the basic HGSMI
* structures
* @param pvHostAreaMapping where the area for the host heap is mapped at
* @param offVRAMHostArea offset of the host heap area into VRAM
* @param cbHostArea size in bytes of the host heap area
*/
RTDECL(void) VBoxHGSMISetupHostContext(PHGSMIHOSTCOMMANDCONTEXT pCtx,
void *pvBaseMapping,
uint32_t offHostFlags,
void *pvHostAreaMapping,
uint32_t offVRAMHostArea,
uint32_t cbHostArea)
{
uint8_t *pu8HostFlags = ((uint8_t *)pvBaseMapping) + offHostFlags;
pCtx->pfHostFlags = (HGSMIHOSTFLAGS *)pu8HostFlags;
/** @todo should we really be using a fixed ISA port value here? */
pCtx->port = (RTIOPORT)VGA_PORT_HGSMI_HOST;
HGSMIAreaInitialize(&pCtx->areaCtx, pvHostAreaMapping, cbHostArea,
offVRAMHostArea);
}
/**
* Tell the host about the ways it can use to communicate back to us via an
* HGSMI command
*
* @returns iprt status value
* @param pCtx the context containing the heap to use
* @param offVRAMFlagsLocation where we wish the host to place its flags
* relative to the start of the VRAM
* @param fCaps additions HGSMI capabilities the guest
* supports
* @param offVRAMHostArea offset into VRAM of the host heap area
* @param cbHostArea size in bytes of the host heap area
*/
RTDECL(int) VBoxHGSMISendHostCtxInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
HGSMIOFFSET offVRAMFlagsLocation,
uint32_t fCaps,
uint32_t offVRAMHostArea,
uint32_t cbHostArea)
{
Log(("VBoxVideo::vboxSetupAdapterInfo\n"));
/* setup the flags first to ensure they are initialized by the time the
* host heap is ready */
int rc = vboxHGSMIReportFlagsLocation(pCtx, offVRAMFlagsLocation);
AssertRC(rc);
if (RT_SUCCESS(rc) && fCaps)
{
/* Inform about caps */
rc = vboxHGSMISendCapsInfo(pCtx, fCaps);
AssertRC(rc);
}
if (RT_SUCCESS (rc))
{
/* Report the host heap location. */
rc = vboxHGSMIReportHostArea(pCtx, offVRAMHostArea, cbHostArea);
AssertRC(rc);
}
Log(("VBoxVideo::vboxSetupAdapterInfo finished rc = %d\n", rc));
return rc;
}
/** Sanity test on first call. We do not worry about concurrency issues. */
static int testQueryConf(PHGSMIGUESTCOMMANDCONTEXT pCtx)
{
static bool cOnce = false;
uint32_t ulValue = 0;
int rc;
if (cOnce)
return VINF_SUCCESS;
cOnce = true;
rc = VBoxQueryConfHGSMI(pCtx, UINT32_MAX, &ulValue);
if (RT_SUCCESS(rc) && ulValue == UINT32_MAX)
return VINF_SUCCESS;
cOnce = false;
if (RT_FAILURE(rc))
return rc;
return VERR_INTERNAL_ERROR;
}
/**
* Query the host for an HGSMI configuration parameter via an HGSMI command.
* @returns iprt status value
* @param pCtx the context containing the heap used
* @param u32Index the index of the parameter to query,
* @see VBVACONF32::u32Index
* @param u32DefValue defaut value
* @param pulValue where to store the value of the parameter on success
*/
RTDECL(int) VBoxQueryConfHGSMIDef(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t u32Index, uint32_t u32DefValue, uint32_t *pulValue)
{
int rc = VINF_SUCCESS;
VBVACONF32 *p;
LogFunc(("u32Index = %d\n", u32Index));
rc = testQueryConf(pCtx);
if (RT_FAILURE(rc))
return rc;
/* Allocate the IO buffer. */
p = (VBVACONF32 *)VBoxHGSMIBufferAlloc(pCtx,
sizeof(VBVACONF32), HGSMI_CH_VBVA,
VBVA_QUERY_CONF32);
if (p)
{
/* Prepare data to be sent to the host. */
p->u32Index = u32Index;
p->u32Value = u32DefValue;
rc = VBoxHGSMIBufferSubmit(pCtx, p);
if (RT_SUCCESS(rc))
{
*pulValue = p->u32Value;
LogFunc(("u32Value = %d\n", p->u32Value));
}
/* Free the IO buffer. */
VBoxHGSMIBufferFree(pCtx, p);
}
else
rc = VERR_NO_MEMORY;
LogFunc(("rc = %d\n", rc));
return rc;
}
RTDECL(int) VBoxQueryConfHGSMI(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t u32Index, uint32_t *pulValue)
{
return VBoxQueryConfHGSMIDef(pCtx, u32Index, UINT32_MAX, pulValue);
}
/**
* Pass the host a new mouse pointer shape via an HGSMI command.
*
* @returns success or failure
* @param fFlags cursor flags, @see VMMDevReqMousePointer::fFlags
* @param cHotX horizontal position of the hot spot
* @param cHotY vertical position of the hot spot
* @param cWidth width in pixels of the cursor
* @param cHeight height in pixels of the cursor
* @param pPixels pixel data, @see VMMDevReqMousePointer for the format
* @param cbLength size in bytes of the pixel data
*/
RTDECL(int) VBoxHGSMIUpdatePointerShape(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t fFlags,
uint32_t cHotX,
uint32_t cHotY,
uint32_t cWidth,
uint32_t cHeight,
uint8_t *pPixels,
uint32_t cbLength)
{
VBVAMOUSEPOINTERSHAPE *p;
uint32_t cbData = 0;
int rc = VINF_SUCCESS;
if (fFlags & VBOX_MOUSE_POINTER_SHAPE)
{
/* Size of the pointer data: sizeof (AND mask) + sizeof (XOR_MASK) */
cbData = ((((cWidth + 7) / 8) * cHeight + 3) & ~3)
+ cWidth * 4 * cHeight;
/* If shape is supplied, then always create the pointer visible.
* See comments in 'vboxUpdatePointerShape'
*/
fFlags |= VBOX_MOUSE_POINTER_VISIBLE;
}
LogFlowFunc(("cbData %d, %dx%d\n", cbData, cWidth, cHeight));
if (cbData > cbLength)
{
LogFunc(("calculated pointer data size is too big (%d bytes, limit %d)\n",
cbData, cbLength));
return VERR_INVALID_PARAMETER;
}
/* Allocate the IO buffer. */
p = (VBVAMOUSEPOINTERSHAPE *)VBoxHGSMIBufferAlloc(pCtx,
sizeof(VBVAMOUSEPOINTERSHAPE)
+ cbData,
HGSMI_CH_VBVA,
VBVA_MOUSE_POINTER_SHAPE);
if (p)
{
/* Prepare data to be sent to the host. */
/* Will be updated by the host. */
p->i32Result = VINF_SUCCESS;
/* We have our custom flags in the field */
p->fu32Flags = fFlags;
p->u32HotX = cHotX;
p->u32HotY = cHotY;
p->u32Width = cWidth;
p->u32Height = cHeight;
if (p->fu32Flags & VBOX_MOUSE_POINTER_SHAPE)
/* Copy the actual pointer data. */
memcpy (p->au8Data, pPixels, cbData);
rc = VBoxHGSMIBufferSubmit(pCtx, p);
if (RT_SUCCESS(rc))
rc = p->i32Result;
/* Free the IO buffer. */
VBoxHGSMIBufferFree(pCtx, p);
}
else
rc = VERR_NO_MEMORY;
LogFlowFunc(("rc %d\n", rc));
return rc;
}
/**
* Report the guest cursor position. The host may wish to use this information
* to re-position its own cursor (though this is currently unlikely). The
* current host cursor position is returned.
* @param pCtx The context containing the heap used.
* @param fReportPosition Are we reporting a position?
* @param x Guest cursor X position.
* @param y Guest cursor Y position.
* @param pxHost Host cursor X position is stored here. Optional.
* @param pyHost Host cursor Y position is stored here. Optional.
* @returns iprt status code.
* @returns VERR_NO_MEMORY HGSMI heap allocation failed.
*/
RTDECL(int) VBoxHGSMICursorPosition(PHGSMIGUESTCOMMANDCONTEXT pCtx, bool fReportPosition, uint32_t x, uint32_t y,
uint32_t *pxHost, uint32_t *pyHost)
{
int rc = VINF_SUCCESS;
VBVACURSORPOSITION *p;
Log(("%s: x=%u, y=%u\n", __PRETTY_FUNCTION__, (unsigned)x, (unsigned)y));
/* Allocate the IO buffer. */
p = (VBVACURSORPOSITION *)VBoxHGSMIBufferAlloc(pCtx, sizeof(VBVACURSORPOSITION), HGSMI_CH_VBVA, VBVA_CURSOR_POSITION);
if (p)
{
/* Prepare data to be sent to the host. */
p->fReportPosition = fReportPosition ? 1 : 0;
p->x = x;
p->y = y;
rc = VBoxHGSMIBufferSubmit(pCtx, p);
if (RT_SUCCESS(rc))
{
if (pxHost)
*pxHost = p->x;
if (pyHost)
*pyHost = p->y;
Log(("%s: return: x=%u, y=%u\n", __PRETTY_FUNCTION__, (unsigned)p->x, (unsigned)p->y));
}
/* Free the IO buffer. */
VBoxHGSMIBufferFree(pCtx, p);
}
else
rc = VERR_NO_MEMORY;
LogFunc(("rc = %d\n", rc));
return rc;
}
/** @todo Mouse pointer position to be read from VMMDev memory, address of the memory region
* can be queried from VMMDev via an IOCTL. This VMMDev memory region will contain
* host information which is needed by the guest.
*
* Reading will not cause a switch to the host.
*
* Have to take into account:
* * synchronization: host must write to the memory only from EMT,
* large structures must be read under flag, which tells the host
* that the guest is currently reading the memory (OWNER flag?).
* * guest writes: may be allocate a page for the host info and make
* the page readonly for the guest.
* * the information should be available only for additions drivers.
* * VMMDev additions driver will inform the host which version of the info it expects,
* host must support all versions.
*
*/
/* $Id: HGSMICommon.cpp $ */
/** @file
* VBox Host Guest Shared Memory Interface (HGSMI) - Functions common to both host and guest.
*/
/*
* Copyright (C) 2006-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#define LOG_DISABLED /* Maybe we can enabled it all the time now? */
#define LOG_GROUP LOG_GROUP_HGSMI
#include <iprt/heap.h>
#include <iprt/string.h>
#include <VBox/HGSMI/HGSMI.h>
#include <VBox/log.h>
/* Channel flags. */
#define HGSMI_CH_F_REGISTERED 0x01
/* Assertions for situations which could happen and normally must be processed properly
* but must be investigated during development: guest misbehaving, etc.
*/
#ifdef HGSMI_STRICT
#define HGSMI_STRICT_ASSERT_FAILED() AssertFailed()
#define HGSMI_STRICT_ASSERT(expr) Assert(expr)
#else
#define HGSMI_STRICT_ASSERT_FAILED() do {} while (0)
#define HGSMI_STRICT_ASSERT(expr) do {} while (0)
#endif /* !HGSMI_STRICT */
/* One-at-a-Time Hash from
* http://www.burtleburtle.net/bob/hash/doobs.html
*
* ub4 one_at_a_time(char *key, ub4 len)
* {
* ub4 hash, i;
* for (hash=0, i=0; i<len; ++i)
* {
* hash += key[i];
* hash += (hash << 10);
* hash ^= (hash >> 6);
* }
* hash += (hash << 3);
* hash ^= (hash >> 11);
* hash += (hash << 15);
* return hash;
* }
*/
static uint32_t hgsmiHashBegin(void)
{
return 0;
}
static uint32_t hgsmiHashProcess(uint32_t hash,
const void *pvData,
size_t cbData)
{
const uint8_t *pu8Data = (const uint8_t *)pvData;
while (cbData--)
{
hash += *pu8Data++;
hash += (hash << 10);
hash ^= (hash >> 6);
}
return hash;
}
static uint32_t hgsmiHashEnd(uint32_t hash)
{
hash += (hash << 3);
hash ^= (hash >> 11);
hash += (hash << 15);
return hash;
}
uint32_t HGSMIChecksum(HGSMIOFFSET offBuffer,
const HGSMIBUFFERHEADER *pHeader,
const HGSMIBUFFERTAIL *pTail)
{
uint32_t u32Checksum = hgsmiHashBegin();
u32Checksum = hgsmiHashProcess(u32Checksum, &offBuffer, sizeof(offBuffer));
u32Checksum = hgsmiHashProcess(u32Checksum, pHeader, sizeof(HGSMIBUFFERHEADER));
u32Checksum = hgsmiHashProcess(u32Checksum, pTail, RT_OFFSETOF(HGSMIBUFFERTAIL, u32Checksum));
return hgsmiHashEnd(u32Checksum);
}
int HGSMIAreaInitialize(HGSMIAREA *pArea,
void *pvBase,
HGSMISIZE cbArea,
HGSMIOFFSET offBase)
{
uint8_t *pu8Base = (uint8_t *)pvBase;
if ( !pArea /* Check that the area: */
|| cbArea < HGSMIBufferMinimumSize() /* large enough; */
|| pu8Base + cbArea < pu8Base /* no address space wrap; */
|| offBase > UINT32_C(0xFFFFFFFF) - cbArea /* area within the 32 bit space: offBase + cbMem <= 0xFFFFFFFF. */
)
{
return VERR_INVALID_PARAMETER;
}
pArea->pu8Base = pu8Base;
pArea->offBase = offBase;
pArea->offLast = cbArea - HGSMIBufferMinimumSize() + offBase;
pArea->cbArea = cbArea;
return VINF_SUCCESS;
}
void HGSMIAreaClear(HGSMIAREA *pArea)
{
if (pArea)
{
RT_ZERO(*pArea);
}
}
/* Initialize the memory buffer including its checksum.
* No changes alloed to the header and the tail after that.
*/
HGSMIOFFSET HGSMIBufferInitializeSingle(const HGSMIAREA *pArea,
HGSMIBUFFERHEADER *pHeader,
HGSMISIZE cbBuffer,
uint8_t u8Channel,
uint16_t u16ChannelInfo)
{
if ( !pArea
|| !pHeader
|| cbBuffer < HGSMIBufferMinimumSize())
{
return HGSMIOFFSET_VOID;
}
/* Buffer must be within the area:
* * header data size do not exceed the maximum data size;
* * buffer address is greater than the area base address;
* * buffer address is lower than the maximum allowed for the given data size.
*/
HGSMISIZE cbMaximumDataSize = pArea->offLast - pArea->offBase;
uint32_t u32DataSize = cbBuffer - HGSMIBufferMinimumSize();
if ( u32DataSize > cbMaximumDataSize
|| (uint8_t *)pHeader < pArea->pu8Base
|| (uint8_t *)pHeader > pArea->pu8Base + cbMaximumDataSize - u32DataSize)
{
return HGSMIOFFSET_VOID;
}
HGSMIOFFSET offBuffer = HGSMIPointerToOffset(pArea, pHeader);
pHeader->u8Flags = HGSMI_BUFFER_HEADER_F_SEQ_SINGLE;
pHeader->u32DataSize = u32DataSize;
pHeader->u8Channel = u8Channel;
pHeader->u16ChannelInfo = u16ChannelInfo;
RT_ZERO(pHeader->u.au8Union);
HGSMIBUFFERTAIL *pTail = HGSMIBufferTailFromPtr(pHeader, u32DataSize);
pTail->u32Reserved = 0;
pTail->u32Checksum = HGSMIChecksum(offBuffer, pHeader, pTail);
return offBuffer;
}
int HGSMIHeapSetup(HGSMIHEAP *pHeap,
void *pvBase,
HGSMISIZE cbArea,
HGSMIOFFSET offBase,
const HGSMIENV *pEnv)
{
AssertPtrReturn(pHeap, VERR_INVALID_PARAMETER);
AssertPtrReturn(pvBase, VERR_INVALID_PARAMETER);
int rc = HGSMIAreaInitialize(&pHeap->area, pvBase, cbArea, offBase);
if (RT_SUCCESS(rc))
{
rc = HGSMIMAInit(&pHeap->ma, &pHeap->area, NULL, 0, 0, pEnv);
if (RT_FAILURE(rc))
{
HGSMIAreaClear(&pHeap->area);
}
}
return rc;
}
void HGSMIHeapDestroy(HGSMIHEAP *pHeap)
{
if (pHeap)
{
HGSMIMAUninit(&pHeap->ma);
RT_ZERO(*pHeap);
}
}
void *HGSMIHeapAlloc(HGSMIHEAP *pHeap,
HGSMISIZE cbData,
uint8_t u8Channel,
uint16_t u16ChannelInfo)
{
HGSMISIZE cbAlloc = HGSMIBufferRequiredSize(cbData);
HGSMIBUFFERHEADER *pHeader = (HGSMIBUFFERHEADER *)HGSMIHeapBufferAlloc(pHeap, cbAlloc);
if (pHeader)
{
HGSMIOFFSET offBuffer = HGSMIBufferInitializeSingle(HGSMIHeapArea(pHeap), pHeader,
cbAlloc, u8Channel, u16ChannelInfo);
if (offBuffer == HGSMIOFFSET_VOID)
{
HGSMIHeapBufferFree(pHeap, pHeader);
pHeader = NULL;
}
}
return pHeader? HGSMIBufferDataFromPtr(pHeader): NULL;
}
void HGSMIHeapFree(HGSMIHEAP *pHeap,
void *pvData)
{
if (pvData)
{
HGSMIBUFFERHEADER *pHeader = HGSMIBufferHeaderFromData(pvData);
HGSMIHeapBufferFree(pHeap, pHeader);
}
}
void *HGSMIHeapBufferAlloc(HGSMIHEAP *pHeap,
HGSMISIZE cbBuffer)
{
void *pvBuf = HGSMIMAAlloc(&pHeap->ma, cbBuffer);
return pvBuf;
}
void HGSMIHeapBufferFree(HGSMIHEAP *pHeap,
void *pvBuf)
{
HGSMIMAFree(&pHeap->ma, pvBuf);
}
typedef struct HGSMIBUFFERCONTEXT
{
const HGSMIBUFFERHEADER *pHeader; /* The original buffer header. */
void *pvData; /* Payload data in the buffer./ */
uint32_t cbData; /* Size of data */
} HGSMIBUFFERCONTEXT;
/** Verify that the given offBuffer points to a valid buffer, which is within the area.
*
* @returns VBox status and the buffer information in pBufferContext.
* @param pArea Area which supposed to contain the buffer.
* @param offBuffer The buffer location in the area.
* @param pBufferContext Where to write information about the buffer.
*/
static int hgsmiVerifyBuffer(const HGSMIAREA *pArea,
HGSMIOFFSET offBuffer,
HGSMIBUFFERCONTEXT *pBufferContext)
{
LogFlowFunc(("buffer 0x%x, area %p %x [0x%x;0x%x]\n",
offBuffer, pArea->pu8Base, pArea->cbArea, pArea->offBase, pArea->offLast));
int rc = VINF_SUCCESS;
if ( offBuffer < pArea->offBase
|| offBuffer > pArea->offLast)
{
LogFunc(("offset 0x%x is outside the area [0x%x;0x%x]!!!\n",
offBuffer, pArea->offBase, pArea->offLast));
rc = VERR_INVALID_PARAMETER;
HGSMI_STRICT_ASSERT_FAILED();
}
else
{
void *pvBuffer = HGSMIOffsetToPointer(pArea, offBuffer);
HGSMIBUFFERHEADER header = *HGSMIBufferHeaderFromPtr(pvBuffer);
/* Quick check of the data size, it should be less than the maximum
* data size for the buffer at this offset.
*/
LogFlowFunc(("datasize check: header.u32DataSize = 0x%x pArea->offLast - offBuffer = 0x%x\n",
header.u32DataSize, pArea->offLast - offBuffer));
if (header.u32DataSize <= pArea->offLast - offBuffer)
{
HGSMIBUFFERTAIL tail = *HGSMIBufferTailFromPtr(pvBuffer, header.u32DataSize);
/* At least both header and tail structures are in the area. Check the checksum. */
uint32_t u32Checksum = HGSMIChecksum(offBuffer, &header, &tail);
LogFlowFunc(("checksum check: u32Checksum = 0x%x pTail->u32Checksum = 0x%x\n",
u32Checksum, tail.u32Checksum));
if (u32Checksum == tail.u32Checksum)
{
/* Success. */
pBufferContext->pHeader = HGSMIBufferHeaderFromPtr(pvBuffer);
pBufferContext->pvData = HGSMIBufferDataFromPtr(pvBuffer);
pBufferContext->cbData = header.u32DataSize;
}
else
{
LogFunc(("invalid checksum 0x%x, expected 0x%x!!!\n",
u32Checksum, tail.u32Checksum));
rc = VERR_INVALID_STATE;
HGSMI_STRICT_ASSERT_FAILED();
}
}
else
{
LogFunc(("invalid data size 0x%x, maximum is 0x%x!!!\n",
header.u32DataSize, pArea->offLast - offBuffer));
rc = VERR_TOO_MUCH_DATA;
HGSMI_STRICT_ASSERT_FAILED();
}
}
return rc;
}
/** Helper to convert HGSMI channel index to the channel structure pointer.
*
* @returns Pointer to the channel data.
* @param pChannelInfo The channel pool.
* @param u8Channel The channel index.
*/
HGSMICHANNEL *HGSMIChannelFindById(HGSMICHANNELINFO *pChannelInfo,
uint8_t u8Channel)
{
AssertCompile(RT_ELEMENTS(pChannelInfo->Channels) >= 0x100);
HGSMICHANNEL *pChannel = &pChannelInfo->Channels[u8Channel];
if (pChannel->u8Flags & HGSMI_CH_F_REGISTERED)
{
return pChannel;
}
return NULL;
}
/** Process a guest buffer.
*
* @returns VBox status code.
* @param pArea Area which supposed to contain the buffer.
* @param pChannelInfo The channel pool.
* @param offBuffer The buffer location in the area.
*/
int HGSMIBufferProcess(const HGSMIAREA *pArea,
HGSMICHANNELINFO *pChannelInfo,
HGSMIOFFSET offBuffer)
{
LogFlowFunc(("pArea %p, offBuffer 0x%x\n", pArea, offBuffer));
AssertPtrReturn(pArea, VERR_INVALID_PARAMETER);
AssertPtrReturn(pChannelInfo, VERR_INVALID_PARAMETER);
/* Guest has prepared a command description at 'offBuffer'. */
HGSMIBUFFERCONTEXT bufferContext;
int rc = hgsmiVerifyBuffer(pArea, offBuffer, &bufferContext);
if (RT_SUCCESS(rc))
{
/* Pass the command to the appropriate handler registered with this instance.
* Start with the handler list head, which is the preallocated HGSMI setup channel.
*/
const HGSMICHANNEL *pChannel = HGSMIChannelFindById(pChannelInfo, bufferContext.pHeader->u8Channel);
if (pChannel)
{
const HGSMICHANNELHANDLER *pHandler = &pChannel->handler;
if (pHandler->pfnHandler)
{
pHandler->pfnHandler(pHandler->pvHandler, bufferContext.pHeader->u16ChannelInfo,
bufferContext.pvData, bufferContext.cbData);
}
HGSMI_STRICT_ASSERT(RT_SUCCESS(hgsmiVerifyBuffer(pArea, offBuffer, &bufferContext)));
}
else
{
rc = VERR_INVALID_FUNCTION;
HGSMI_STRICT_ASSERT_FAILED();
}
}
return rc;
}
/** Register a new HGSMI channel by index.
*
* @returns VBox status code.
* @param pChannelInfo The channel pool managed by the caller.
* @param u8Channel Index of the channel.
* @param pszName Name of the channel (optional, allocated by the caller).
* @param pfnChannelHandler The channel callback.
* @param pvChannelHandler The callback pointer.
*/
int HGSMIChannelRegister(HGSMICHANNELINFO *pChannelInfo,
uint8_t u8Channel,
const char *pszName,
PFNHGSMICHANNELHANDLER pfnChannelHandler,
void *pvChannelHandler)
{
/* Check whether the channel is already registered. */
HGSMICHANNEL *pChannel = HGSMIChannelFindById(pChannelInfo, u8Channel);
if (pChannel)
{
HGSMI_STRICT_ASSERT_FAILED();
return VERR_ALREADY_EXISTS;
}
/* Channel is not yet registered. */
pChannel = &pChannelInfo->Channels[u8Channel];
pChannel->u8Flags = HGSMI_CH_F_REGISTERED;
pChannel->u8Channel = u8Channel;
pChannel->handler.pfnHandler = pfnChannelHandler;
pChannel->handler.pvHandler = pvChannelHandler;
pChannel->pszName = pszName;
return VINF_SUCCESS;
}
/* $Id: HGSMIMemAlloc.cpp $ */
/** @file
* VBox Host Guest Shared Memory Interface (HGSMI) - Memory allocator.
*/
/*
* Copyright (C) 2014 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
/*
* Memory allocator
* ----------------
*
* Area [0; AreaSize) contains only the data, control structures are separate.
* Block sizes are power of 2: 32B, ..., 1MB
* Area size can be anything and will be divided initially to largest possible free blocks.
*
* The entire area is described by a list of 32 bit block descriptors:
* * bits 0..3 - order, which is log2 size of the block - 5: 2^(0+5) ... 2^(15+5) == 32B .. 1MB
* * bit 4 - 1 for free blocks.
* * bits 5..31 - block offset.
*
* 31 ... 5 | 4 | 3 ... 0
* offset F order
*
* There is a sorted collection of all block descriptors
* (key is the block offset, bits 0...4 do not interfere with sorting).
* Also there are lists of free blocks for each size for fast allocation.
*
*
* Implementation
* --------------
*
* The blocks collection is a sorted linear list.
*
* Initially the entire area consists of one or more largest blocks followed by smaller blocks:
* * 100B area - 64B block with descriptor: 0x00000011
* 32B block with descriptor: 0x00000030
* 4B unused
* * 64K area - one 64K block with descriptor: 0x0000001C
* * 512K area - one 512K block with descriptor: 0x0000001F
*
* When allocating a new block:
* * larger free blocks are splitted when there are no smaller free blocks;
* * smaller free blocks are merged if they can build a requested larger block.
*/
#include <VBox/HGSMI/HGSMIMemAlloc.h>
#include <VBox/HGSMI/HGSMI.h>
#include <iprt/err.h>
#include <iprt/string.h>
DECLINLINE(HGSMIOFFSET) hgsmiMADescriptor(HGSMIOFFSET off, bool fFree, HGSMIOFFSET order)
{
return (off & HGSMI_MA_DESC_OFFSET_MASK) |
(fFree? HGSMI_MA_DESC_FREE_MASK: 0) |
(order & HGSMI_MA_DESC_ORDER_MASK);
}
static void hgsmiMABlockFree(HGSMIMADATA *pMA, HGSMIMABLOCK *pBlock)
{
pMA->env.pfnFree(pMA->env.pvEnv, pBlock);
}
static int hgsmiMABlockAlloc(HGSMIMADATA *pMA, HGSMIMABLOCK **ppBlock)
{
int rc = VINF_SUCCESS;
HGSMIMABLOCK *pBlock = (HGSMIMABLOCK *)pMA->env.pfnAlloc(pMA->env.pvEnv, sizeof(HGSMIMABLOCK));
if (pBlock)
{
RT_ZERO(pBlock->nodeBlock);
*ppBlock = pBlock;
}
else
{
rc = VERR_NO_MEMORY;
}
return rc;
}
/* Divide entire area to free blocks. */
static int hgsmiMAFormat(HGSMIMADATA *pMA)
{
int rc = VINF_SUCCESS;
/* Initial value, it will be updated in the loop below. */
pMA->cbMaxBlock = HGSMI_MA_BLOCK_SIZE_MIN;
pMA->cBlocks = 0;
HGSMISIZE cbBlock = HGSMI_MA_BLOCK_SIZE_MAX;
HGSMISIZE cbRemaining = pMA->area.cbArea;
HGSMIOFFSET off = 0;
while (cbBlock >= HGSMI_MA_BLOCK_SIZE_MIN)
{
/* Build a list of free memory blocks with u32BlockSize. */
uint32_t cBlocks = cbRemaining / cbBlock;
if (cBlocks > 0)
{
if (pMA->cbMaxBlock < cbBlock)
{
pMA->cbMaxBlock = cbBlock;
}
HGSMIOFFSET order = HGSMIMASize2Order(cbBlock);
uint32_t i;
for (i = 0; i < cBlocks; ++i)
{
/* A new free block. */
HGSMIMABLOCK *pBlock;
rc = hgsmiMABlockAlloc(pMA, &pBlock);
if (RT_FAILURE(rc))
{
break;
}
pBlock->descriptor = hgsmiMADescriptor(off, true, order);
RTListAppend(&pMA->listBlocks, &pBlock->nodeBlock);
++pMA->cBlocks;
off += cbBlock;
cbRemaining -= cbBlock;
}
}
if (RT_FAILURE(rc))
{
break;
}
cbBlock /= 2;
}
return rc;
}
static int hgsmiMARebuildFreeLists(HGSMIMADATA *pMA)
{
int rc = VINF_SUCCESS;
HGSMIMABLOCK *pIter;
RTListForEach(&pMA->listBlocks, pIter, HGSMIMABLOCK, nodeBlock)
{
if (HGSMI_MA_DESC_IS_FREE(pIter->descriptor))
{
HGSMIOFFSET order = HGSMI_MA_DESC_ORDER(pIter->descriptor);
RTListAppend(&pMA->aListFreeBlocks[order], &pIter->nodeFree);
}
}
return rc;
}
static int hgsmiMARestore(HGSMIMADATA *pMA, HGSMIOFFSET *paDescriptors, uint32_t cDescriptors, HGSMISIZE cbMaxBlock)
{
int rc = VINF_SUCCESS;
pMA->cbMaxBlock = cbMaxBlock;
pMA->cBlocks = 0;
HGSMISIZE cbRemaining = pMA->area.cbArea;
HGSMIOFFSET off = 0;
uint32_t i;
for (i = 0; i < cDescriptors; ++i)
{
/* Verify the descriptor. */
HGSMISIZE cbBlock = HGSMIMAOrder2Size(HGSMI_MA_DESC_ORDER(paDescriptors[i]));
if ( off != HGSMI_MA_DESC_OFFSET(paDescriptors[i])
|| cbBlock > cbRemaining
|| cbBlock > pMA->cbMaxBlock)
{
rc = VERR_INVALID_PARAMETER;
break;
}
/* A new free block. */
HGSMIMABLOCK *pBlock;
rc = hgsmiMABlockAlloc(pMA, &pBlock);
if (RT_FAILURE(rc))
{
break;
}
pBlock->descriptor = paDescriptors[i];
RTListAppend(&pMA->listBlocks, &pBlock->nodeBlock);
++pMA->cBlocks;
off += cbBlock;
cbRemaining -= cbBlock;
}
return rc;
}
static HGSMIMABLOCK *hgsmiMAGetFreeBlock(HGSMIMADATA *pMA, HGSMIOFFSET order)
{
HGSMIMABLOCK *pBlock = NULL;
HGSMIOFFSET i;
for (i = order; i < RT_ELEMENTS(pMA->aListFreeBlocks); ++i)
{
pBlock = RTListGetFirst(&pMA->aListFreeBlocks[i], HGSMIMABLOCK, nodeFree);
if (pBlock)
{
break;
}
}
if (pBlock)
{
AssertReturn(HGSMI_MA_DESC_IS_FREE(pBlock->descriptor), NULL);
/* Where the block starts. */
HGSMIOFFSET off = HGSMI_MA_DESC_OFFSET(pBlock->descriptor);
/* 'i' is the order of the block. */
while (i != order)
{
/* A larger block was found and need to be split to 2 smaller blocks. */
HGSMIMABLOCK *pBlock2;
int rc = hgsmiMABlockAlloc(pMA, &pBlock2);
if (RT_FAILURE(rc))
{
pBlock = NULL;
break;
}
/* Create 2 blocks with descreased order. */
--i;
/* Remove from the free list. */
RTListNodeRemove(&pBlock->nodeFree);
pBlock->descriptor = hgsmiMADescriptor(off, true, i);
pBlock2->descriptor = hgsmiMADescriptor(off + HGSMIMAOrder2Size(i), true, i);
/* Update list of all blocks by inserting pBlock2 after pBlock. */
RTListNodeInsertAfter(&pBlock->nodeBlock, &pBlock2->nodeBlock);
++pMA->cBlocks;
/* Update the free list. */
RTListAppend(&pMA->aListFreeBlocks[i], &pBlock->nodeFree);
RTListAppend(&pMA->aListFreeBlocks[i], &pBlock2->nodeFree);
}
}
return pBlock;
}
static void hgsmiMAReformatFreeBlocks(HGSMIMADATA *pMA, HGSMIOFFSET maxId,
HGSMIMABLOCK *pStart, HGSMIMABLOCK *pEnd, HGSMISIZE cbBlocks)
{
int rc = VINF_SUCCESS;
/*
* Blocks starting from pStart until pEnd will be replaced with
* another set of blocks.
*
* The new set will include the block with the required order.
* Since the required order is larger than any existing block,
* it will replace at least two existing blocks.
* The new set will also have minimal possible number of blocks.
* Therefore the new set will have at least one block less.
* Blocks will be updated in place and remaining blocks will be
* deallocated.
*/
HGSMISIZE u32BlockSize = HGSMIMAOrder2Size(maxId);
HGSMISIZE cbRemaining = cbBlocks;
HGSMIOFFSET off = HGSMI_MA_DESC_OFFSET(pStart->descriptor);
HGSMIMABLOCK *pBlock = pStart;
while (u32BlockSize >= HGSMI_MA_BLOCK_SIZE_MIN && cbRemaining)
{
/* Build a list of free memory blocks with u32BlockSize. */
uint32_t cBlocks = cbRemaining / u32BlockSize;
if (cBlocks > 0)
{
HGSMIOFFSET order = HGSMIMASize2Order(u32BlockSize);
uint32_t i;
for (i = 0; i < cBlocks; ++i)
{
if (pBlock == pEnd)
{
/* Should never happen because the new set of blocks is supposed to be smaller. */
AssertFailed();
rc = VERR_OUT_OF_RESOURCES;
break;
}
/* Remove from the free list. */
RTListNodeRemove(&pBlock->nodeFree);
pBlock->descriptor = hgsmiMADescriptor(off, true, order);
RTListAppend(&pMA->aListFreeBlocks[order], &pBlock->nodeFree);
off += u32BlockSize;
cbRemaining -= u32BlockSize;
pBlock = RTListGetNext(&pMA->listBlocks, pBlock, HGSMIMABLOCK, nodeBlock);
}
}
if (RT_FAILURE(rc))
{
break;
}
u32BlockSize /= 2;
}
Assert(cbRemaining == 0);
if (RT_SUCCESS(rc))
{
/* Remove remaining free blocks from pBlock until pEnd */
for (;;)
{
bool fEnd = (pBlock == pEnd);
HGSMIMABLOCK *pNext = RTListGetNext(&pMA->listBlocks, pBlock, HGSMIMABLOCK, nodeBlock);
RTListNodeRemove(&pBlock->nodeFree);
RTListNodeRemove(&pBlock->nodeBlock);
--pMA->cBlocks;
hgsmiMABlockFree(pMA, pBlock);
if (fEnd)
{
break;
}
pBlock = pNext;
}
}
}
static void hgsmiMAQueryFreeRange(HGSMIMADATA *pMA, HGSMIMABLOCK *pBlock, HGSMISIZE cbRequired,
HGSMIMABLOCK **ppStart, HGSMIMABLOCK **ppEnd, HGSMISIZE *pcbBlocks)
{
Assert(HGSMI_MA_DESC_IS_FREE(pBlock->descriptor));
*pcbBlocks = HGSMIMAOrder2Size(HGSMI_MA_DESC_ORDER(pBlock->descriptor));
*ppStart = pBlock;
*ppEnd = pBlock;
HGSMIMABLOCK *p;
for (;;)
{
p = RTListGetNext(&pMA->listBlocks, *ppEnd, HGSMIMABLOCK, nodeBlock);
if (!p || !HGSMI_MA_DESC_IS_FREE(p->descriptor))
{
break;
}
*pcbBlocks += HGSMIMAOrder2Size(HGSMI_MA_DESC_ORDER(p->descriptor));
*ppEnd = p;
if (cbRequired && *pcbBlocks >= cbRequired)
{
return;
}
}
for (;;)
{
p = RTListGetPrev(&pMA->listBlocks, *ppStart, HGSMIMABLOCK, nodeBlock);
if (!p || !HGSMI_MA_DESC_IS_FREE(p->descriptor))
{
break;
}
*pcbBlocks += HGSMIMAOrder2Size(HGSMI_MA_DESC_ORDER(p->descriptor));
*ppStart = p;
if (cbRequired && *pcbBlocks >= cbRequired)
{
return;
}
}
}
static void hgsmiMAMergeFreeBlocks(HGSMIMADATA *pMA, HGSMIOFFSET order)
{
/* Try to create a free block with the order from smaller free blocks. */
if (order == 0)
{
/* No smaller blocks. */
return;
}
HGSMISIZE cbRequired = HGSMIMAOrder2Size(order);
/* Scan all free lists of smaller blocks.
*
* Get the sequence of free blocks before and after each free block.
* If possible, re-split the sequence to get the required block and other free block(s).
* If not possible, try the next free block.
*
* Free blocks are scanned from i to 0 orders.
*/
HGSMIOFFSET i = order - 1;
for (;;)
{
HGSMIMABLOCK *pIter;
RTListForEach(&pMA->aListFreeBlocks[i], pIter, HGSMIMABLOCK, nodeFree)
{
Assert(HGSMI_MA_DESC_ORDER(pIter->descriptor) == i);
HGSMISIZE cbBlocks;
HGSMIMABLOCK *pFreeStart;
HGSMIMABLOCK *pFreeEnd;
hgsmiMAQueryFreeRange(pMA, pIter, cbRequired, &pFreeStart, &pFreeEnd, &cbBlocks);
Assert((cbBlocks / HGSMI_MA_BLOCK_SIZE_MIN) * HGSMI_MA_BLOCK_SIZE_MIN == cbBlocks);
/* Verify whether cbBlocks is enough for the requested block. */
if (cbBlocks >= cbRequired)
{
/* Build new free blocks starting from the requested. */
hgsmiMAReformatFreeBlocks(pMA, order, pFreeStart, pFreeEnd, cbBlocks);
i = 0; /* Leave the loop. */
break;
}
}
if (i == 0)
{
break;
}
--i;
}
}
static HGSMIOFFSET hgsmiMAAlloc(HGSMIMADATA *pMA, HGSMISIZE cb)
{
if (cb > pMA->cbMaxBlock)
{
return HGSMIOFFSET_VOID;
}
if (cb < HGSMI_MA_BLOCK_SIZE_MIN)
{
cb = HGSMI_MA_BLOCK_SIZE_MIN;
}
HGSMIOFFSET order = HGSMIPopCnt32(cb - 1) - HGSMI_MA_DESC_ORDER_BASE;
AssertReturn(HGSMIMAOrder2Size(order) >= cb, HGSMIOFFSET_VOID);
AssertReturn(order < RT_ELEMENTS(pMA->aListFreeBlocks), HGSMIOFFSET_VOID);
HGSMIMABLOCK *pBlock = hgsmiMAGetFreeBlock(pMA, order);
if (RT_UNLIKELY(pBlock == NULL))
{
/* No free block with large enough size. Merge smaller free blocks and try again. */
hgsmiMAMergeFreeBlocks(pMA, order);
pBlock = hgsmiMAGetFreeBlock(pMA, order);
}
if (RT_LIKELY(pBlock != NULL))
{
RTListNodeRemove(&pBlock->nodeFree);
pBlock->descriptor &= ~HGSMI_MA_DESC_FREE_MASK;
return HGSMI_MA_DESC_OFFSET(pBlock->descriptor);
}
return HGSMIOFFSET_VOID;
}
static void hgsmiMAFree(HGSMIMADATA *pMA, HGSMIOFFSET off)
{
if (off == HGSMIOFFSET_VOID)
{
return;
}
/* Find the block corresponding to the offset. */
Assert((off / HGSMI_MA_BLOCK_SIZE_MIN) * HGSMI_MA_BLOCK_SIZE_MIN == off);
HGSMIMABLOCK *pBlock = HGSMIMASearchOffset(pMA, off);
if (pBlock)
{
if (HGSMI_MA_DESC_OFFSET(pBlock->descriptor) == off)
{
/* Found the right block, mark it as free. */
pBlock->descriptor |= HGSMI_MA_DESC_FREE_MASK;
RTListAppend(&pMA->aListFreeBlocks[HGSMI_MA_DESC_ORDER(pBlock->descriptor)], &pBlock->nodeFree);
return;
}
}
AssertFailed();
}
int HGSMIMAInit(HGSMIMADATA *pMA, const HGSMIAREA *pArea,
HGSMIOFFSET *paDescriptors, uint32_t cDescriptors, HGSMISIZE cbMaxBlock,
const HGSMIENV *pEnv)
{
AssertReturn(pArea->cbArea < UINT32_C(0x80000000), VERR_INVALID_PARAMETER);
AssertReturn(pArea->cbArea >= HGSMI_MA_BLOCK_SIZE_MIN, VERR_INVALID_PARAMETER);
RT_ZERO(*pMA);
HGSMISIZE cb = (pArea->cbArea / HGSMI_MA_BLOCK_SIZE_MIN) * HGSMI_MA_BLOCK_SIZE_MIN;
int rc = HGSMIAreaInitialize(&pMA->area, pArea->pu8Base, cb, 0);
if (RT_SUCCESS(rc))
{
pMA->env = *pEnv;
uint32_t i;
for (i = 0; i < RT_ELEMENTS(pMA->aListFreeBlocks); ++i)
{
RTListInit(&pMA->aListFreeBlocks[i]);
}
RTListInit(&pMA->listBlocks);
if (cDescriptors)
{
rc = hgsmiMARestore(pMA, paDescriptors, cDescriptors, cbMaxBlock);
}
else
{
rc = hgsmiMAFormat(pMA);
}
if (RT_SUCCESS(rc))
{
rc = hgsmiMARebuildFreeLists(pMA);
}
}
return rc;
}
void HGSMIMAUninit(HGSMIMADATA *pMA)
{
HGSMIMABLOCK *pIter;
HGSMIMABLOCK *pNext;
RTListForEachSafe(&pMA->listBlocks, pIter, pNext, HGSMIMABLOCK, nodeBlock)
{
RTListNodeRemove(&pIter->nodeBlock);
hgsmiMABlockFree(pMA, pIter);
}
RT_ZERO(*pMA);
}
HGSMIOFFSET HGSMIMAPointerToOffset(const HGSMIMADATA *pMA, const void *pv)
{
if (HGSMIAreaContainsPointer(&pMA->area, pv))
{
return HGSMIPointerToOffset(&pMA->area, pv);
}
AssertFailed();
return HGSMIOFFSET_VOID;
}
void *HGSMIMAOffsetToPointer(const HGSMIMADATA *pMA, HGSMIOFFSET off)
{
if (HGSMIAreaContainsOffset(&pMA->area, off))
{
return HGSMIOffsetToPointer(&pMA->area, off);
}
AssertFailed();
return NULL;
}
void *HGSMIMAAlloc(HGSMIMADATA *pMA, HGSMISIZE cb)
{
HGSMIOFFSET off = hgsmiMAAlloc(pMA, cb);
return HGSMIMAOffsetToPointer(pMA, off);
}
void HGSMIMAFree(HGSMIMADATA *pMA, void *pv)
{
HGSMIOFFSET off = HGSMIMAPointerToOffset(pMA, pv);
if (off != HGSMIOFFSET_VOID)
{
hgsmiMAFree(pMA, off);
}
else
{
AssertFailed();
}
}
HGSMIMABLOCK *HGSMIMASearchOffset(HGSMIMADATA *pMA, HGSMIOFFSET off)
{
/* Binary search in the block list for the offset. */
HGSMIMABLOCK *pStart = RTListGetFirst(&pMA->listBlocks, HGSMIMABLOCK, nodeBlock);
HGSMIMABLOCK *pEnd = RTListGetLast(&pMA->listBlocks, HGSMIMABLOCK, nodeBlock);
HGSMIMABLOCK *pMiddle;
uint32_t iStart = 0;
uint32_t iEnd = pMA->cBlocks;
uint32_t iMiddle;
for (;;)
{
pMiddle = pStart;
iMiddle = iStart + (iEnd - iStart) / 2;
if (iMiddle == iStart)
{
break;
}
/* Find the block with the iMiddle index. Never go further than pEnd. */
uint32_t i;
for (i = iStart; i < iMiddle && pMiddle != pEnd; ++i)
{
pMiddle = RTListNodeGetNext(&pMiddle->nodeBlock, HGSMIMABLOCK, nodeBlock);
}
HGSMIOFFSET offMiddle = HGSMI_MA_DESC_OFFSET(pMiddle->descriptor);
if (offMiddle > off)
{
pEnd = pMiddle;
iEnd = iMiddle;
}
else
{
pStart = pMiddle;
iStart = iMiddle;
}
}
return pMiddle;
}
/*
* Helper.
*/
uint32_t HGSMIPopCnt32(uint32_t u32)
{
uint32_t c = 0;
if (u32 > 0xFFFF) { c += 16; u32 >>= 16; }
if (u32 > 0xFF) { c += 8; u32 >>= 8; }
if (u32 > 0xF) { c += 4; u32 >>= 4; }
if (u32 > 0x3) { c += 2; u32 >>= 2; }
if (u32 > 0x1) { c += 1; u32 >>= 1; }
return c + u32;
}
KBUILD_EXTMOD=${srctree}/ubuntu/vbox
# $Id: Makefile.module $
# $Id: Makefile.module.kms $
## @file
# VirtualBox Guest Additions Module Makefile.
#
......@@ -7,7 +7,7 @@ KBUILD_EXTMOD=${srctree}/ubuntu/vbox
#
#
# Copyright (C) 2006-2011 Oracle Corporation
# Copyright (C) 2006-2010 Oracle Corporation
#
# This file is part of VirtualBox Open Source Edition (OSE), as
# available from http://www.virtualbox.org. This file is free software;
......@@ -24,7 +24,14 @@ include $(obj)/Makefile.include.header
MOD_NAME = vboxvideo
MOD_OBJS = vboxvideo_drm.o
ifeq ($(filter 1.% 2.% 3.0.% 3.1.% 3.2.% 3.3.% 3.4.% 3.5.% 3.6.% 3.7.% \
3.8.% 3.9.% 3.10.%,$(KERNELRELEASE)),)
MOD_OBJS = HGSMIBase.o HGSMICommon.o HGSMIMemAlloc.o heapoffset.o \
Modesetting.o vbox_drv.o vbox_fb.o vbox_irq.o vbox_main.o \
vbox_mode.o vbox_ttm.o VBVABase.o
else
MOD_OBJS = vbox_dummy.o
endif
ifneq ($(wildcard $(KBUILD_EXTMOD)/vboxvideo),)
MANGLING := $(KBUILD_EXTMOD)/vboxvideo/include/VBox/VBoxGuestMangling.h
......@@ -32,15 +39,10 @@ else
MANGLING := $(KBUILD_EXTMOD)/include/VBox/VBoxGuestMangling.h
endif
MOD_CFLAGS = -Wno-declaration-after-statement -fshort-wchar -include $(MANGLING)
MOD_INCL = $(addprefix -I$(KBUILD_EXTMOD),/ /include /r0drv/linux)
MOD_INCL = $(addprefix -I$(KBUILD_EXTMOD),/ /include)
# What on earth is this?
MOD_INCL += $(addprefix -I$(KBUILD_EXTMOD)/vboxvideo,/ /include /r0drv/linux)
# Enterprise Linux 6.5 does not include the drm user API headers with the kernel
# headers.
MOD_INCL += $(foreach inc,$(KERN_INCL) include,\
$(if $(wildcard $(inc)/linux/utsrelease.h),\
$(if $(shell grep '"2.6.32.*el6.*"' $(inc)/linux/utsrelease.h),\
-I/usr/include,),))
MOD_INCL += $(addprefix -I$(KBUILD_EXTMOD)/vboxvideo,/ /include)
MOD_INCL += -Iinclude/drm
MOD_DEFS := -DRT_OS_LINUX -DIN_RING0 -DIN_RT_R0 \
-DIN_SUP_R0 -DVBOX -DVBOX_WITH_HGCM -DLOG_TO_BACKDOOR -DIN_MODULE \
-DIN_GUEST_R0
......@@ -54,4 +56,3 @@ endif
MOD_CLEAN = . linux r0drv r0drv/linux
include $(obj)/Makefile.include.footer
/* $Id: Modesetting.cpp $ */
/** @file
* VirtualBox Video driver, common code - HGSMI initialisation and helper
* functions.
*/
/*
* Copyright (C) 2006-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#include <VBox/VBoxVideoGuest.h>
#include <VBox/VBoxVideo.h>
#include <VBox/VBoxGuest.h>
#include <VBox/Hardware/VBoxVideoVBE.h>
#include <VBox/VMMDev.h>
#include <iprt/asm.h>
#include <iprt/log.h>
#ifndef VBOX_GUESTR3XF86MOD
# include <iprt/string.h>
#endif
/**
* Gets the count of virtual monitors attached to the guest via an HGSMI
* command
*
* @returns the right count on success or 1 on failure.
* @param pCtx the context containing the heap to use
*/
RTDECL(uint32_t) VBoxHGSMIGetMonitorCount(PHGSMIGUESTCOMMANDCONTEXT pCtx)
{
/* Query the configured number of displays. */
uint32_t cDisplays = 0;
VBoxQueryConfHGSMI(pCtx, VBOX_VBVA_CONF32_MONITOR_COUNT, &cDisplays);
LogFunc(("cDisplays = %d\n", cDisplays));
if (cDisplays == 0 || cDisplays > VBOX_VIDEO_MAX_SCREENS)
/* Host reported some bad value. Continue in the 1 screen mode. */
cDisplays = 1;
return cDisplays;
}
/**
* Returns the size of the video RAM in bytes.
*
* @returns the size
*/
RTDECL(uint32_t) VBoxVideoGetVRAMSize(void)
{
/** @note A 32bit read on this port returns the VRAM size. */
return VBoxVideoCmnPortReadUlong(VBE_DISPI_IOPORT_DATA);
}
/**
* Check whether this hardware allows the display width to have non-multiple-
* of-eight values.
*
* @returns true if any width is allowed, false otherwise.
*/
RTDECL(bool) VBoxVideoAnyWidthAllowed(void)
{
unsigned DispiId;
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX, VBE_DISPI_INDEX_ID);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_DATA, VBE_DISPI_ID_ANYX);
DispiId = VBoxVideoCmnPortReadUshort(VBE_DISPI_IOPORT_DATA);
return (DispiId == VBE_DISPI_ID_ANYX);
}
/**
* Tell the host about how VRAM is divided up between each screen via an HGSMI
* command. It is acceptable to specifiy identical data for each screen if
* they share a single framebuffer.
*
* @returns iprt status code, either VERR_NO_MEMORY or the status returned by
* @a pfnFill
* @todo What was I thinking of with that callback function? It
* would be much simpler to just pass in a structure in normal
* memory and copy it.
* @param pCtx the context containing the heap to use
* @param u32Count the number of screens we are activating
* @param pfnFill a callback which initialises the VBVAINFOVIEW structures
* for all screens
* @param pvData context data for @a pfnFill
*/
RTDECL(int) VBoxHGSMISendViewInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t u32Count,
PFNHGSMIFILLVIEWINFO pfnFill,
void *pvData)
{
int rc;
/* Issue the screen info command. */
void *p = VBoxHGSMIBufferAlloc(pCtx, sizeof(VBVAINFOVIEW) * u32Count,
HGSMI_CH_VBVA, VBVA_INFO_VIEW);
if (p)
{
VBVAINFOVIEW *pInfo = (VBVAINFOVIEW *)p;
rc = pfnFill(pvData, pInfo, u32Count);
if (RT_SUCCESS(rc))
VBoxHGSMIBufferSubmit (pCtx, p);
VBoxHGSMIBufferFree(pCtx, p);
}
else
rc = VERR_NO_MEMORY;
return rc;
}
/**
* Set a video mode using port registers. This must be done for the first
* screen before every HGSMI modeset and also works when HGSM is not enabled.
* @param cWidth the mode width
* @param cHeight the mode height
* @param cVirtWidth the mode pitch
* @param cBPP the colour depth of the mode
* @param fFlags flags for the mode. These will be or-ed with the
* default _ENABLED flag, so unless you are restoring
* a saved mode or have special requirements you can pass
* zero here.
* @param cx the horizontal panning offset
* @param cy the vertical panning offset
*/
RTDECL(void) VBoxVideoSetModeRegisters(uint16_t cWidth, uint16_t cHeight,
uint16_t cVirtWidth, uint16_t cBPP,
uint16_t fFlags, uint16_t cx,
uint16_t cy)
{
/* set the mode characteristics */
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX, VBE_DISPI_INDEX_XRES);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_DATA, cWidth);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX, VBE_DISPI_INDEX_YRES);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_DATA, cHeight);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX,
VBE_DISPI_INDEX_VIRT_WIDTH);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_DATA, cVirtWidth);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX, VBE_DISPI_INDEX_BPP);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_DATA, cBPP);
/* enable the mode */
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX,
VBE_DISPI_INDEX_ENABLE);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_DATA,
fFlags | VBE_DISPI_ENABLED);
/* Panning registers */
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX,
VBE_DISPI_INDEX_X_OFFSET);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_DATA, cx);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX,
VBE_DISPI_INDEX_Y_OFFSET);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_DATA, cy);
/** @todo read from the port to see if the mode switch was successful */
}
/**
* Get the video mode for the first screen using the port registers. All
* parameters are optional
* @returns true if the VBE mode returned is active, false if we are in VGA
* mode
* @note If anyone else needs additional register values just extend the
* function with additional parameters and fix any existing callers.
* @param pcWidth where to store the mode width
* @param pcHeight where to store the mode height
* @param pcVirtWidth where to store the mode pitch
* @param pcBPP where to store the colour depth of the mode
* @param pfFlags where to store the flags for the mode
*/
RTDECL(bool) VBoxVideoGetModeRegisters(uint16_t *pcWidth, uint16_t *pcHeight,
uint16_t *pcVirtWidth, uint16_t *pcBPP,
uint16_t *pfFlags)
{
uint16_t fFlags;
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX,
VBE_DISPI_INDEX_ENABLE);
fFlags = VBoxVideoCmnPortReadUshort(VBE_DISPI_IOPORT_DATA);
if (pcWidth)
{
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX,
VBE_DISPI_INDEX_XRES);
*pcWidth = VBoxVideoCmnPortReadUshort(VBE_DISPI_IOPORT_DATA);
}
if (pcHeight)
{
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX,
VBE_DISPI_INDEX_YRES);
*pcHeight = VBoxVideoCmnPortReadUshort(VBE_DISPI_IOPORT_DATA);
}
if (pcVirtWidth)
{
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX,
VBE_DISPI_INDEX_VIRT_WIDTH);
*pcVirtWidth = VBoxVideoCmnPortReadUshort(VBE_DISPI_IOPORT_DATA);
}
if (pcBPP)
{
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX,
VBE_DISPI_INDEX_BPP);
*pcBPP = VBoxVideoCmnPortReadUshort(VBE_DISPI_IOPORT_DATA);
}
if (pfFlags)
*pfFlags = fFlags;
return RT_BOOL(fFlags & VBE_DISPI_ENABLED);
}
/**
* Disable our extended graphics mode and go back to VGA mode.
*/
RTDECL(void) VBoxVideoDisableVBE(void)
{
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_INDEX,
VBE_DISPI_INDEX_ENABLE);
VBoxVideoCmnPortWriteUshort(VBE_DISPI_IOPORT_DATA, 0);
}
/**
* Set a video mode via an HGSMI request. The views must have been
* initialised first using @a VBoxHGSMISendViewInfo and if the mode is being
* set on the first display then it must be set first using registers.
* @param cDisplay the screen number
* @param cOriginX the horizontal displacement relative to the first screen
* @param cOriginY the vertical displacement relative to the first screen
* @param offStart the offset of the visible area of the framebuffer
* relative to the framebuffer start
* @param cbPitch the offset in bytes between the starts of two adjecent
* scan lines in video RAM
* @param cWidth the mode width
* @param cHeight the mode height
* @param cBPP the colour depth of the mode
*/
RTDECL(void) VBoxHGSMIProcessDisplayInfo(PHGSMIGUESTCOMMANDCONTEXT pCtx,
uint32_t cDisplay,
int32_t cOriginX,
int32_t cOriginY,
uint32_t offStart,
uint32_t cbPitch,
uint32_t cWidth,
uint32_t cHeight,
uint16_t cBPP,
uint16_t fFlags)
{
/* Issue the screen info command. */
void *p = VBoxHGSMIBufferAlloc(pCtx,
sizeof (VBVAINFOSCREEN),
HGSMI_CH_VBVA,
VBVA_INFO_SCREEN);
if (!p)
{
LogFunc(("HGSMIHeapAlloc failed\n"));
}
else
{
VBVAINFOSCREEN *pScreen = (VBVAINFOSCREEN *)p;
pScreen->u32ViewIndex = cDisplay;
pScreen->i32OriginX = cOriginX;
pScreen->i32OriginY = cOriginY;
pScreen->u32StartOffset = offStart;
pScreen->u32LineSize = cbPitch;
pScreen->u32Width = cWidth;
pScreen->u32Height = cHeight;
pScreen->u16BitsPerPixel = cBPP;
pScreen->u16Flags = fFlags;
VBoxHGSMIBufferSubmit(pCtx, p);
VBoxHGSMIBufferFree(pCtx, p);
}
}
/** Report the rectangle relative to which absolute pointer events should be
* expressed. This information remains valid until the next VBVA resize event
* for any screen, at which time it is reset to the bounding rectangle of all
* virtual screens.
* @param pCtx The context containing the heap to use.
* @param cOriginX Upper left X co-ordinate relative to the first screen.
* @param cOriginY Upper left Y co-ordinate relative to the first screen.
* @param cWidth Rectangle width.
* @param cHeight Rectangle height.
* @returns iprt status code.
* @returns VERR_NO_MEMORY HGSMI heap allocation failed.
*/
RTDECL(int) VBoxHGSMIUpdateInputMapping(PHGSMIGUESTCOMMANDCONTEXT pCtx, int32_t cOriginX, int32_t cOriginY,
uint32_t cWidth, uint32_t cHeight)
{
int rc = VINF_SUCCESS;
VBVAREPORTINPUTMAPPING *p;
Log(("%s: cOriginX=%d, cOriginY=%d, cWidth=%u, cHeight=%u\n", __PRETTY_FUNCTION__, (int)cOriginX, (int)cOriginX,
(unsigned)cWidth, (unsigned)cHeight));
/* Allocate the IO buffer. */
p = (VBVAREPORTINPUTMAPPING *)VBoxHGSMIBufferAlloc(pCtx, sizeof(VBVAREPORTINPUTMAPPING), HGSMI_CH_VBVA,
VBVA_REPORT_INPUT_MAPPING);
if (p)
{
/* Prepare data to be sent to the host. */
p->x = cOriginX;
p->y = cOriginY;
p->cx = cWidth;
p->cy = cHeight;
rc = VBoxHGSMIBufferSubmit(pCtx, p);
/* Free the IO buffer. */
VBoxHGSMIBufferFree(pCtx, p);
}
else
rc = VERR_NO_MEMORY;
LogFunc(("rc = %d\n", rc));
return rc;
}
/**
* Get most recent video mode hints.
* @param pCtx the context containing the heap to use
* @param cScreens the number of screens to query hints for, starting at 0.
* @param pHints array of VBVAMODEHINT structures for receiving the hints.
* @returns iprt status code
* @returns VERR_NO_MEMORY HGSMI heap allocation failed.
* @returns VERR_NOT_SUPPORTED Host does not support this command.
*/
RTDECL(int) VBoxHGSMIGetModeHints(PHGSMIGUESTCOMMANDCONTEXT pCtx,
unsigned cScreens, VBVAMODEHINT *paHints)
{
int rc;
AssertPtrReturn(paHints, VERR_INVALID_POINTER);
void *p = VBoxHGSMIBufferAlloc(pCtx, sizeof(VBVAQUERYMODEHINTS)
+ cScreens * sizeof(VBVAMODEHINT),
HGSMI_CH_VBVA, VBVA_QUERY_MODE_HINTS);
if (!p)
{
LogFunc(("HGSMIHeapAlloc failed\n"));
return VERR_NO_MEMORY;
}
else
{
VBVAQUERYMODEHINTS *pQuery = (VBVAQUERYMODEHINTS *)p;
pQuery->cHintsQueried = cScreens;
pQuery->cbHintStructureGuest = sizeof(VBVAMODEHINT);
pQuery->rc = VERR_NOT_SUPPORTED;
VBoxHGSMIBufferSubmit(pCtx, p);
rc = pQuery->rc;
if (RT_SUCCESS(rc))
memcpy(paHints, ((uint8_t *)p) + sizeof(VBVAQUERYMODEHINTS),
cScreens * sizeof(VBVAMODEHINT));
VBoxHGSMIBufferFree(pCtx, p);
}
return rc;
}
/**
* Query the supported flags in VBVAINFOSCREEN::u16Flags.
*
* @returns The mask of VBVA_SCREEN_F_* flags or 0 if host does not support the request.
* @param pCtx the context containing the heap to use
*/
RTDECL(uint16_t) VBoxHGSMIGetScreenFlags(PHGSMIGUESTCOMMANDCONTEXT pCtx)
{
uint32_t u32Flags = 0;
int rc = VBoxQueryConfHGSMIDef(pCtx, VBOX_VBVA_CONF32_SCREEN_FLAGS, 0, &u32Flags);
LogFunc(("u32Flags = 0x%x rc %Rrc\n", u32Flags, rc));
if (RT_FAILURE(rc))
u32Flags = 0;
return (uint16_t)u32Flags;
}
/* $Id: VBVABase.cpp $ */
/** @file
* VirtualBox Video driver, common code - VBVA initialisation and helper
* functions.
*/
/*
* Copyright (C) 2006-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#include <VBox/VBoxVideoGuest.h>
#include <VBox/VBoxVideo.h>
#include <VBox/err.h>
#include <VBox/log.h>
#include <iprt/assert.h>
#include <iprt/string.h>
/*
* There is a hardware ring buffer in the graphics device video RAM, formerly
* in the VBox VMMDev PCI memory space.
* All graphics commands go there serialized by VBoxVBVABufferBeginUpdate.
* and vboxHwBufferEndUpdate.
*
* off32Free is writing position. off32Data is reading position.
* off32Free == off32Data means buffer is empty.
* There must be always gap between off32Data and off32Free when data
* are in the buffer.
* Guest only changes off32Free, host changes off32Data.
*/
/* Forward declarations of internal functions. */
static void vboxHwBufferFlush(PHGSMIGUESTCOMMANDCONTEXT pCtx);
static void vboxHwBufferPlaceDataAt(PVBVABUFFERCONTEXT pCtx, const void *p,
uint32_t cb, uint32_t offset);
static bool vboxHwBufferWrite(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
const void *p, uint32_t cb);
static bool vboxVBVAInformHost(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
int32_t cScreen, bool bEnable)
{
bool bRc = false;
#if 0 /* All callers check this */
if (ppdev->bHGSMISupported)
#endif
{
void *p = VBoxHGSMIBufferAlloc(pHGSMICtx,
sizeof (VBVAENABLE_EX),
HGSMI_CH_VBVA,
VBVA_ENABLE);
if (!p)
{
LogFunc(("HGSMIHeapAlloc failed\n"));
}
else
{
VBVAENABLE_EX *pEnable = (VBVAENABLE_EX *)p;
pEnable->Base.u32Flags = bEnable? VBVA_F_ENABLE: VBVA_F_DISABLE;
pEnable->Base.u32Offset = pCtx->offVRAMBuffer;
pEnable->Base.i32Result = VERR_NOT_SUPPORTED;
if (cScreen >= 0)
{
pEnable->Base.u32Flags |= VBVA_F_EXTENDED | VBVA_F_ABSOFFSET;
pEnable->u32ScreenId = cScreen;
}
VBoxHGSMIBufferSubmit(pHGSMICtx, p);
if (bEnable)
{
bRc = RT_SUCCESS(pEnable->Base.i32Result);
}
else
{
bRc = true;
}
VBoxHGSMIBufferFree(pHGSMICtx, p);
}
}
return bRc;
}
/*
* Public hardware buffer methods.
*/
RTDECL(bool) VBoxVBVAEnable(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
VBVABUFFER *pVBVA, int32_t cScreen)
{
bool bRc = false;
LogFlowFunc(("pVBVA %p\n", pVBVA));
#if 0 /* All callers check this */
if (ppdev->bHGSMISupported)
#endif
{
LogFunc(("pVBVA %p vbva off 0x%x\n", pVBVA, pCtx->offVRAMBuffer));
pVBVA->hostFlags.u32HostEvents = 0;
pVBVA->hostFlags.u32SupportedOrders = 0;
pVBVA->off32Data = 0;
pVBVA->off32Free = 0;
memset(pVBVA->aRecords, 0, sizeof (pVBVA->aRecords));
pVBVA->indexRecordFirst = 0;
pVBVA->indexRecordFree = 0;
pVBVA->cbPartialWriteThreshold = 256;
pVBVA->cbData = pCtx->cbBuffer - sizeof (VBVABUFFER) + sizeof (pVBVA->au8Data);
pCtx->fHwBufferOverflow = false;
pCtx->pRecord = NULL;
pCtx->pVBVA = pVBVA;
bRc = vboxVBVAInformHost(pCtx, pHGSMICtx, cScreen, true);
}
if (!bRc)
{
VBoxVBVADisable(pCtx, pHGSMICtx, cScreen);
}
return bRc;
}
RTDECL(void) VBoxVBVADisable(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
int32_t cScreen)
{
LogFlowFunc(("\n"));
pCtx->fHwBufferOverflow = false;
pCtx->pRecord = NULL;
pCtx->pVBVA = NULL;
vboxVBVAInformHost(pCtx, pHGSMICtx, cScreen, false);
return;
}
RTDECL(bool) VBoxVBVABufferBeginUpdate(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx)
{
bool bRc = false;
// LogFunc(("flags = 0x%08X\n", pCtx->pVBVA? pCtx->pVBVA->u32HostEvents: -1));
if ( pCtx->pVBVA
&& (pCtx->pVBVA->hostFlags.u32HostEvents & VBVA_F_MODE_ENABLED))
{
uint32_t indexRecordNext;
Assert(!pCtx->fHwBufferOverflow);
Assert(pCtx->pRecord == NULL);
indexRecordNext = (pCtx->pVBVA->indexRecordFree + 1) % VBVA_MAX_RECORDS;
if (indexRecordNext == pCtx->pVBVA->indexRecordFirst)
{
/* All slots in the records queue are used. */
vboxHwBufferFlush (pHGSMICtx);
}
if (indexRecordNext == pCtx->pVBVA->indexRecordFirst)
{
/* Even after flush there is no place. Fail the request. */
LogFunc(("no space in the queue of records!!! first %d, last %d\n",
pCtx->pVBVA->indexRecordFirst, pCtx->pVBVA->indexRecordFree));
}
else
{
/* Initialize the record. */
VBVARECORD *pRecord = &pCtx->pVBVA->aRecords[pCtx->pVBVA->indexRecordFree];
pRecord->cbRecord = VBVA_F_RECORD_PARTIAL;
pCtx->pVBVA->indexRecordFree = indexRecordNext;
// LogFunc(("indexRecordNext = %d\n", indexRecordNext));
/* Remember which record we are using. */
pCtx->pRecord = pRecord;
bRc = true;
}
}
return bRc;
}
RTDECL(void) VBoxVBVABufferEndUpdate(PVBVABUFFERCONTEXT pCtx)
{
VBVARECORD *pRecord;
// LogFunc(("\n"));
Assert(pCtx->pVBVA);
pRecord = pCtx->pRecord;
Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
/* Mark the record completed. */
pRecord->cbRecord &= ~VBVA_F_RECORD_PARTIAL;
pCtx->fHwBufferOverflow = false;
pCtx->pRecord = NULL;
return;
}
/*
* Private operations.
*/
static uint32_t vboxHwBufferAvail (const VBVABUFFER *pVBVA)
{
int32_t i32Diff = pVBVA->off32Data - pVBVA->off32Free;
return i32Diff > 0? i32Diff: pVBVA->cbData + i32Diff;
}
static void vboxHwBufferFlush(PHGSMIGUESTCOMMANDCONTEXT pCtx)
{
/* Issue the flush command. */
void *p = VBoxHGSMIBufferAlloc(pCtx,
sizeof (VBVAFLUSH),
HGSMI_CH_VBVA,
VBVA_FLUSH);
if (!p)
{
LogFunc(("HGSMIHeapAlloc failed\n"));
}
else
{
VBVAFLUSH *pFlush = (VBVAFLUSH *)p;
pFlush->u32Reserved = 0;
VBoxHGSMIBufferSubmit(pCtx, p);
VBoxHGSMIBufferFree(pCtx, p);
}
return;
}
static void vboxHwBufferPlaceDataAt(PVBVABUFFERCONTEXT pCtx, const void *p,
uint32_t cb, uint32_t offset)
{
VBVABUFFER *pVBVA = pCtx->pVBVA;
uint32_t u32BytesTillBoundary = pVBVA->cbData - offset;
uint8_t *dst = &pVBVA->au8Data[offset];
int32_t i32Diff = cb - u32BytesTillBoundary;
if (i32Diff <= 0)
{
/* Chunk will not cross buffer boundary. */
memcpy (dst, p, cb);
}
else
{
/* Chunk crosses buffer boundary. */
memcpy (dst, p, u32BytesTillBoundary);
memcpy (&pVBVA->au8Data[0], (uint8_t *)p + u32BytesTillBoundary, i32Diff);
}
return;
}
static bool vboxHwBufferWrite(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
const void *p, uint32_t cb)
{
VBVARECORD *pRecord;
uint32_t cbHwBufferAvail;
uint32_t cbWritten = 0;
VBVABUFFER *pVBVA = pCtx->pVBVA;
Assert(pVBVA);
if (!pVBVA || pCtx->fHwBufferOverflow)
{
return false;
}
Assert(pVBVA->indexRecordFirst != pVBVA->indexRecordFree);
pRecord = pCtx->pRecord;
Assert(pRecord && (pRecord->cbRecord & VBVA_F_RECORD_PARTIAL));
// LogFunc(("%d\n", cb));
cbHwBufferAvail = vboxHwBufferAvail (pVBVA);
while (cb > 0)
{
uint32_t cbChunk = cb;
// LogFunc(("pVBVA->off32Free %d, pRecord->cbRecord 0x%08X, cbHwBufferAvail %d, cb %d, cbWritten %d\n",
// pVBVA->off32Free, pRecord->cbRecord, cbHwBufferAvail, cb, cbWritten));
if (cbChunk >= cbHwBufferAvail)
{
LogFunc(("1) avail %d, chunk %d\n", cbHwBufferAvail, cbChunk));
vboxHwBufferFlush (pHGSMICtx);
cbHwBufferAvail = vboxHwBufferAvail (pVBVA);
if (cbChunk >= cbHwBufferAvail)
{
LogFunc(("no place for %d bytes. Only %d bytes available after flush. Going to partial writes.\n",
cb, cbHwBufferAvail));
if (cbHwBufferAvail <= pVBVA->cbPartialWriteThreshold)
{
LogFunc(("Buffer overflow!!!\n"));
pCtx->fHwBufferOverflow = true;
Assert(false);
return false;
}
cbChunk = cbHwBufferAvail - pVBVA->cbPartialWriteThreshold;
}
}
Assert(cbChunk <= cb);
Assert(cbChunk <= vboxHwBufferAvail (pVBVA));
vboxHwBufferPlaceDataAt (pCtx, (uint8_t *)p + cbWritten, cbChunk, pVBVA->off32Free);
pVBVA->off32Free = (pVBVA->off32Free + cbChunk) % pVBVA->cbData;
pRecord->cbRecord += cbChunk;
cbHwBufferAvail -= cbChunk;
cb -= cbChunk;
cbWritten += cbChunk;
}
return true;
}
/*
* Public writer to the hardware buffer.
*/
RTDECL(bool) VBoxVBVAWrite(PVBVABUFFERCONTEXT pCtx,
PHGSMIGUESTCOMMANDCONTEXT pHGSMICtx,
const void *pv, uint32_t cb)
{
return vboxHwBufferWrite (pCtx, pHGSMICtx, pv, cb);
}
RTDECL(bool) VBoxVBVAOrderSupported(PVBVABUFFERCONTEXT pCtx, unsigned code)
{
VBVABUFFER *pVBVA = pCtx->pVBVA;
if (!pVBVA)
{
return false;
}
if (pVBVA->hostFlags.u32SupportedOrders & (1 << code))
{
return true;
}
return false;
}
RTDECL(void) VBoxVBVASetupBufferContext(PVBVABUFFERCONTEXT pCtx,
uint32_t offVRAMBuffer,
uint32_t cbBuffer)
{
pCtx->offVRAMBuffer = offVRAMBuffer;
pCtx->cbBuffer = cbBuffer;
}
/* $Id: heapoffset.cpp $ */
/** @file
* IPRT - An Offset Based Heap.
*/
/*
* Copyright (C) 2006-2015 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*
* The contents of this file may alternatively be used under the terms
* of the Common Development and Distribution License Version 1.0
* (CDDL) only, as it comes in the "COPYING.CDDL" file of the
* VirtualBox OSE distribution, in which case the provisions of the
* CDDL are applicable instead of those of the GPL.
*
* You may elect to license modified versions of this file under the
* terms and conditions of either the GPL or the CDDL or both.
*/
/*********************************************************************************************************************************
* Header Files *
*********************************************************************************************************************************/
#define LOG_GROUP RTLOGGROUP_DEFAULT
#include <iprt/heap.h>
#include "internal/iprt.h"
#include <iprt/assert.h>
#include <iprt/asm.h>
#include <iprt/err.h>
#include <iprt/log.h>
#include <iprt/param.h>
#include <iprt/string.h>
#include "internal/magics.h"
/*********************************************************************************************************************************
* Structures and Typedefs *
*********************************************************************************************************************************/
/** Pointer to the heap anchor block. */
typedef struct RTHEAPOFFSETINTERNAL *PRTHEAPOFFSETINTERNAL;
/** Pointer to a heap block. */
typedef struct RTHEAPOFFSETBLOCK *PRTHEAPOFFSETBLOCK;
/** Pointer to a free heap block. */
typedef struct RTHEAPOFFSETFREE *PRTHEAPOFFSETFREE;
/**
* Structure describing a block in an offset based heap.
*
* If this block is allocated, it is followed by the user data.
* If this block is free, see RTHEAPOFFSETFREE.
*/
typedef struct RTHEAPOFFSETBLOCK
{
/** The next block in the global block list. */
uint32_t /*PRTHEAPOFFSETBLOCK*/ offNext;
/** The previous block in the global block list. */
uint32_t /*PRTHEAPOFFSETBLOCK*/ offPrev;
/** Offset into the heap of this block. Used to locate the anchor block. */
uint32_t /*PRTHEAPOFFSETINTERNAL*/ offSelf;
/** Flags + magic. */
uint32_t fFlags;
} RTHEAPOFFSETBLOCK;
AssertCompileSize(RTHEAPOFFSETBLOCK, 16);
/** The block is free if this flag is set. When cleared it's allocated. */
#define RTHEAPOFFSETBLOCK_FLAGS_FREE (RT_BIT_32(0))
/** The magic value. */
#define RTHEAPOFFSETBLOCK_FLAGS_MAGIC (UINT32_C(0xabcdef00))
/** The mask that needs to be applied to RTHEAPOFFSETBLOCK::fFlags to obtain the magic value. */
#define RTHEAPOFFSETBLOCK_FLAGS_MAGIC_MASK (~RT_BIT_32(0))
/**
* Checks if the specified block is valid or not.
* @returns boolean answer.
* @param pBlock Pointer to a RTHEAPOFFSETBLOCK structure.
*/
#define RTHEAPOFFSETBLOCK_IS_VALID(pBlock) \
( ((pBlock)->fFlags & RTHEAPOFFSETBLOCK_FLAGS_MAGIC_MASK) == RTHEAPOFFSETBLOCK_FLAGS_MAGIC )
/**
* Checks if the specified block is valid and in use.
* @returns boolean answer.
* @param pBlock Pointer to a RTHEAPOFFSETBLOCK structure.
*/
#define RTHEAPOFFSETBLOCK_IS_VALID_USED(pBlock) \
( ((pBlock)->fFlags & (RTHEAPOFFSETBLOCK_FLAGS_MAGIC_MASK | RTHEAPOFFSETBLOCK_FLAGS_FREE)) \
== RTHEAPOFFSETBLOCK_FLAGS_MAGIC )
/**
* Checks if the specified block is valid and free.
* @returns boolean answer.
* @param pBlock Pointer to a RTHEAPOFFSETBLOCK structure.
*/
#define RTHEAPOFFSETBLOCK_IS_VALID_FREE(pBlock) \
( ((pBlock)->fFlags & (RTHEAPOFFSETBLOCK_FLAGS_MAGIC_MASK | RTHEAPOFFSETBLOCK_FLAGS_FREE)) \
== (RTHEAPOFFSETBLOCK_FLAGS_MAGIC | RTHEAPOFFSETBLOCK_FLAGS_FREE) )
/**
* Checks if the specified block is free or not.
* @returns boolean answer.
* @param pBlock Pointer to a valid RTHEAPOFFSETBLOCK structure.
*/
#define RTHEAPOFFSETBLOCK_IS_FREE(pBlock) (!!((pBlock)->fFlags & RTHEAPOFFSETBLOCK_FLAGS_FREE))
/**
* A free heap block.
* This is an extended version of RTHEAPOFFSETBLOCK that takes the unused
* user data to store free list pointers and a cached size value.
*/
typedef struct RTHEAPOFFSETFREE
{
/** Core stuff. */
RTHEAPOFFSETBLOCK Core;
/** Pointer to the next free block. */
uint32_t /*PRTHEAPOFFSETFREE*/ offNext;
/** Pointer to the previous free block. */
uint32_t /*PRTHEAPOFFSETFREE*/ offPrev;
/** The size of the block (excluding the RTHEAPOFFSETBLOCK part). */
uint32_t cb;
/** An alignment filler to make it a multiple of 16 bytes. */
uint32_t Alignment;
} RTHEAPOFFSETFREE;
AssertCompileSize(RTHEAPOFFSETFREE, 16+16);
/**
* The heap anchor block.
* This structure is placed at the head of the memory block specified to RTHeapOffsetInit(),
* which means that the first RTHEAPOFFSETBLOCK appears immediately after this structure.
*/
typedef struct RTHEAPOFFSETINTERNAL
{
/** The typical magic (RTHEAPOFFSET_MAGIC). */
uint32_t u32Magic;
/** The heap size. (This structure is included!) */
uint32_t cbHeap;
/** The amount of free memory in the heap. */
uint32_t cbFree;
/** Free head pointer. */
uint32_t /*PRTHEAPOFFSETFREE*/ offFreeHead;
/** Free tail pointer. */
uint32_t /*PRTHEAPOFFSETFREE*/ offFreeTail;
/** Make the size of this structure 32 bytes. */
uint32_t au32Alignment[3];
} RTHEAPOFFSETINTERNAL;
AssertCompileSize(RTHEAPOFFSETINTERNAL, 32);
/** The minimum allocation size. */
#define RTHEAPOFFSET_MIN_BLOCK (sizeof(RTHEAPOFFSETBLOCK))
AssertCompile(RTHEAPOFFSET_MIN_BLOCK >= sizeof(RTHEAPOFFSETBLOCK));
AssertCompile(RTHEAPOFFSET_MIN_BLOCK >= sizeof(RTHEAPOFFSETFREE) - sizeof(RTHEAPOFFSETBLOCK));
/** The minimum and default alignment. */
#define RTHEAPOFFSET_ALIGNMENT (sizeof(RTHEAPOFFSETBLOCK))
/*********************************************************************************************************************************
* Defined Constants And Macros *
*********************************************************************************************************************************/
#ifdef RT_STRICT
# define RTHEAPOFFSET_STRICT 1
#endif
/**
* Converts RTHEAPOFFSETBLOCK::offSelf into a heap anchor block pointer.
*
* @returns Pointer of given type.
* @param pBlock The block to find the heap anchor block for.
*/
#define RTHEAPOFF_GET_ANCHOR(pBlock) ( (PRTHEAPOFFSETINTERNAL)((uint8_t *)(pBlock) - (pBlock)->offSelf ) )
/**
* Converts an offset to a pointer.
*
* All offsets are relative to the heap to make life simple.
*
* @returns Pointer of given type.
* @param pHeapInt Pointer to the heap anchor block.
* @param off The offset to convert.
* @param type The desired type.
*/
#ifdef RTHEAPOFFSET_STRICT
# define RTHEAPOFF_TO_PTR_N(pHeapInt, off, type) ( (type)rtHeapOffCheckedOffToPtr(pHeapInt, off, true /*fNull*/) )
#else
# define RTHEAPOFF_TO_PTR_N(pHeapInt, off, type) ( (type)((off) ? (uint8_t *)(pHeapInt) + (off) : NULL) )
#endif
/**
* Converts an offset to a pointer.
*
* All offsets are relative to the heap to make life simple.
*
* @returns Pointer of given type.
* @param pHeapInt Pointer to the heap anchor block.
* @param off The offset to convert.
* @param type The desired type.
*/
#ifdef RTHEAPOFFSET_STRICT
# define RTHEAPOFF_TO_PTR(pHeapInt, off, type) ( (type)rtHeapOffCheckedOffToPtr(pHeapInt, off, false /*fNull*/) )
#else
# define RTHEAPOFF_TO_PTR(pHeapInt, off, type) ( (type)((uint8_t *)(pHeapInt) + (off)) )
#endif
/**
* Converts a pointer to an offset.
*
* All offsets are relative to the heap to make life simple.
*
* @returns Offset into the heap.
* @param pHeapInt Pointer to the heap anchor block.
* @param ptr The pointer to convert.
*/
#ifdef RTHEAPOFFSET_STRICT
# define RTHEAPOFF_TO_OFF(pHeapInt, ptr) rtHeapOffCheckedPtrToOff(pHeapInt, ptr)
#else
# define RTHEAPOFF_TO_OFF(pHeapInt, ptr) ( (uint32_t)((ptr) ? (uintptr_t)(ptr) - (uintptr_t)(pHeapInt) : UINT32_C(0)) )
#endif
#define ASSERT_L(a, b) AssertMsg((a) < (b), ("a=%08x b=%08x\n", (a), (b)))
#define ASSERT_LE(a, b) AssertMsg((a) <= (b), ("a=%08x b=%08x\n", (a), (b)))
#define ASSERT_G(a, b) AssertMsg((a) > (b), ("a=%08x b=%08x\n", (a), (b)))
#define ASSERT_GE(a, b) AssertMsg((a) >= (b), ("a=%08x b=%08x\n", (a), (b)))
#define ASSERT_ALIGN(a) AssertMsg(!((uintptr_t)(a) & (RTHEAPOFFSET_ALIGNMENT - 1)), ("a=%p\n", (uintptr_t)(a)))
#define ASSERT_PREV(pHeapInt, pBlock) \
do { ASSERT_ALIGN((pBlock)->offPrev); \
if ((pBlock)->offPrev) \
{ \
ASSERT_L((pBlock)->offPrev, RTHEAPOFF_TO_OFF(pHeapInt, pBlock)); \
ASSERT_GE((pBlock)->offPrev, sizeof(RTHEAPOFFSETINTERNAL)); \
} \
else \
Assert((pBlock) == (PRTHEAPOFFSETBLOCK)((pHeapInt) + 1)); \
} while (0)
#define ASSERT_NEXT(pHeap, pBlock) \
do { ASSERT_ALIGN((pBlock)->offNext); \
if ((pBlock)->offNext) \
{ \
ASSERT_L((pBlock)->offNext, (pHeapInt)->cbHeap); \
ASSERT_G((pBlock)->offNext, RTHEAPOFF_TO_OFF(pHeapInt, pBlock)); \
} \
} while (0)
#define ASSERT_BLOCK(pHeapInt, pBlock) \
do { AssertMsg(RTHEAPOFFSETBLOCK_IS_VALID(pBlock), ("%#x\n", (pBlock)->fFlags)); \
AssertMsg(RTHEAPOFF_GET_ANCHOR(pBlock) == (pHeapInt), ("%p != %p\n", RTHEAPOFF_GET_ANCHOR(pBlock), (pHeapInt))); \
ASSERT_GE(RTHEAPOFF_TO_OFF(pHeapInt, pBlock), sizeof(RTHEAPOFFSETINTERNAL)); \
ASSERT_L( RTHEAPOFF_TO_OFF(pHeapInt, pBlock), (pHeapInt)->cbHeap); \
ASSERT_NEXT(pHeapInt, pBlock); \
ASSERT_PREV(pHeapInt, pBlock); \
} while (0)
#define ASSERT_BLOCK_USED(pHeapInt, pBlock) \
do { AssertMsg(RTHEAPOFFSETBLOCK_IS_VALID_USED((pBlock)), ("%#x\n", (pBlock)->fFlags)); \
AssertMsg(RTHEAPOFF_GET_ANCHOR(pBlock) == (pHeapInt), ("%p != %p\n", RTHEAPOFF_GET_ANCHOR(pBlock), (pHeapInt))); \
ASSERT_GE(RTHEAPOFF_TO_OFF(pHeapInt, pBlock), sizeof(RTHEAPOFFSETINTERNAL)); \
ASSERT_L( RTHEAPOFF_TO_OFF(pHeapInt, pBlock), (pHeapInt)->cbHeap); \
ASSERT_NEXT(pHeapInt, pBlock); \
ASSERT_PREV(pHeapInt, pBlock); \
} while (0)
#define ASSERT_FREE_PREV(pHeapInt, pBlock) \
do { ASSERT_ALIGN((pBlock)->offPrev); \
if ((pBlock)->offPrev) \
{ \
ASSERT_GE((pBlock)->offPrev, (pHeapInt)->offFreeHead); \
ASSERT_L((pBlock)->offPrev, RTHEAPOFF_TO_OFF(pHeapInt, pBlock)); \
ASSERT_LE((pBlock)->offPrev, (pBlock)->Core.offPrev); \
} \
else \
Assert((pBlock) == RTHEAPOFF_TO_PTR(pHeapInt, (pHeapInt)->offFreeHead, PRTHEAPOFFSETFREE) ); \
} while (0)
#define ASSERT_FREE_NEXT(pHeapInt, pBlock) \
do { ASSERT_ALIGN((pBlock)->offNext); \
if ((pBlock)->offNext) \
{ \
ASSERT_LE((pBlock)->offNext, (pHeapInt)->offFreeTail); \
ASSERT_G((pBlock)->offNext, RTHEAPOFF_TO_OFF(pHeapInt, pBlock)); \
ASSERT_GE((pBlock)->offNext, (pBlock)->Core.offNext); \
} \
else \
Assert((pBlock) == RTHEAPOFF_TO_PTR(pHeapInt, (pHeapInt)->offFreeTail, PRTHEAPOFFSETFREE)); \
} while (0)
#ifdef RTHEAPOFFSET_STRICT
# define ASSERT_FREE_CB(pHeapInt, pBlock) \
do { size_t cbCalc = ((pBlock)->Core.offNext ? (pBlock)->Core.offNext : (pHeapInt)->cbHeap) \
- RTHEAPOFF_TO_OFF((pHeapInt), (pBlock)) - sizeof(RTHEAPOFFSETBLOCK); \
AssertMsg((pBlock)->cb == cbCalc, ("cb=%#zx cbCalc=%#zx\n", (pBlock)->cb, cbCalc)); \
} while (0)
#else
# define ASSERT_FREE_CB(pHeapInt, pBlock) do {} while (0)
#endif
/** Asserts that a free block is valid. */
#define ASSERT_BLOCK_FREE(pHeapInt, pBlock) \
do { ASSERT_BLOCK(pHeapInt, &(pBlock)->Core); \
Assert(RTHEAPOFFSETBLOCK_IS_VALID_FREE(&(pBlock)->Core)); \
ASSERT_GE(RTHEAPOFF_TO_OFF(pHeapInt, pBlock), (pHeapInt)->offFreeHead); \
ASSERT_LE(RTHEAPOFF_TO_OFF(pHeapInt, pBlock), (pHeapInt)->offFreeTail); \
ASSERT_FREE_NEXT(pHeapInt, pBlock); \
ASSERT_FREE_PREV(pHeapInt, pBlock); \
ASSERT_FREE_CB(pHeapInt, pBlock); \
} while (0)
/** Asserts that the heap anchor block is ok. */
#define ASSERT_ANCHOR(pHeapInt) \
do { AssertPtr(pHeapInt);\
Assert((pHeapInt)->u32Magic == RTHEAPOFFSET_MAGIC); \
} while (0)
/*********************************************************************************************************************************
* Internal Functions *
*********************************************************************************************************************************/
#ifdef RTHEAPOFFSET_STRICT
static void rtHeapOffsetAssertAll(PRTHEAPOFFSETINTERNAL pHeapInt);
#endif
static PRTHEAPOFFSETBLOCK rtHeapOffsetAllocBlock(PRTHEAPOFFSETINTERNAL pHeapInt, size_t cb, size_t uAlignment);
static void rtHeapOffsetFreeBlock(PRTHEAPOFFSETINTERNAL pHeapInt, PRTHEAPOFFSETBLOCK pBlock);
#ifdef RTHEAPOFFSET_STRICT
/** Checked version of RTHEAPOFF_TO_PTR and RTHEAPOFF_TO_PTR_N. */
DECLINLINE(void *) rtHeapOffCheckedOffToPtr(PRTHEAPOFFSETINTERNAL pHeapInt, uint32_t off, bool fNull)
{
Assert(off || fNull);
if (!off)
return NULL;
AssertMsg(off < pHeapInt->cbHeap, ("%#x %#x\n", off, pHeapInt->cbHeap));
AssertMsg(off >= sizeof(*pHeapInt), ("%#x %#x\n", off, sizeof(*pHeapInt)));
return (uint8_t *)pHeapInt + off;
}
/** Checked version of RTHEAPOFF_TO_OFF. */
DECLINLINE(uint32_t) rtHeapOffCheckedPtrToOff(PRTHEAPOFFSETINTERNAL pHeapInt, void *pv)
{
if (!pv)
return 0;
uintptr_t off = (uintptr_t)pv - (uintptr_t)pHeapInt;
AssertMsg(off < pHeapInt->cbHeap, ("%#x %#x\n", off, pHeapInt->cbHeap));
AssertMsg(off >= sizeof(*pHeapInt), ("%#x %#x\n", off, sizeof(*pHeapInt)));
return (uint32_t)off;
}
#endif /* RTHEAPOFFSET_STRICT */
RTDECL(int) RTHeapOffsetInit(PRTHEAPOFFSET phHeap, void *pvMemory, size_t cbMemory)
{
PRTHEAPOFFSETINTERNAL pHeapInt;
PRTHEAPOFFSETFREE pFree;
unsigned i;
/*
* Validate input. The imposed minimum heap size is just a convenient value.
*/
AssertReturn(cbMemory >= PAGE_SIZE, VERR_INVALID_PARAMETER);
AssertReturn(cbMemory < UINT32_MAX, VERR_INVALID_PARAMETER);
AssertPtrReturn(pvMemory, VERR_INVALID_POINTER);
AssertReturn((uintptr_t)pvMemory + (cbMemory - 1) > (uintptr_t)cbMemory, VERR_INVALID_PARAMETER);
/*
* Place the heap anchor block at the start of the heap memory,
* enforce 32 byte alignment of it. Also align the heap size correctly.
*/
pHeapInt = (PRTHEAPOFFSETINTERNAL)pvMemory;
if ((uintptr_t)pvMemory & 31)
{
const uintptr_t off = 32 - ((uintptr_t)pvMemory & 31);
cbMemory -= off;
pHeapInt = (PRTHEAPOFFSETINTERNAL)((uintptr_t)pvMemory + off);
}
cbMemory &= ~(RTHEAPOFFSET_ALIGNMENT - 1);
/* Init the heap anchor block. */
pHeapInt->u32Magic = RTHEAPOFFSET_MAGIC;
pHeapInt->cbHeap = (uint32_t)cbMemory;
pHeapInt->cbFree = (uint32_t)cbMemory
- sizeof(RTHEAPOFFSETBLOCK)
- sizeof(RTHEAPOFFSETINTERNAL);
pHeapInt->offFreeTail = pHeapInt->offFreeHead = sizeof(*pHeapInt);
for (i = 0; i < RT_ELEMENTS(pHeapInt->au32Alignment); i++)
pHeapInt->au32Alignment[i] = UINT32_MAX;
/* Init the single free block. */
pFree = RTHEAPOFF_TO_PTR(pHeapInt, pHeapInt->offFreeHead, PRTHEAPOFFSETFREE);
pFree->Core.offNext = 0;
pFree->Core.offPrev = 0;
pFree->Core.offSelf = pHeapInt->offFreeHead;
pFree->Core.fFlags = RTHEAPOFFSETBLOCK_FLAGS_MAGIC | RTHEAPOFFSETBLOCK_FLAGS_FREE;
pFree->offNext = 0;
pFree->offPrev = 0;
pFree->cb = pHeapInt->cbFree;
*phHeap = pHeapInt;
#ifdef RTHEAPOFFSET_STRICT
rtHeapOffsetAssertAll(pHeapInt);
#endif
return VINF_SUCCESS;
}
RT_EXPORT_SYMBOL(RTHeapOffsetInit);
RTDECL(void *) RTHeapOffsetAlloc(RTHEAPOFFSET hHeap, size_t cb, size_t cbAlignment)
{
PRTHEAPOFFSETINTERNAL pHeapInt = hHeap;
PRTHEAPOFFSETBLOCK pBlock;
/*
* Validate and adjust the input.
*/
AssertPtrReturn(pHeapInt, NULL);
if (cb < RTHEAPOFFSET_MIN_BLOCK)
cb = RTHEAPOFFSET_MIN_BLOCK;
else
cb = RT_ALIGN_Z(cb, RTHEAPOFFSET_ALIGNMENT);
if (!cbAlignment)
cbAlignment = RTHEAPOFFSET_ALIGNMENT;
else
{
Assert(!(cbAlignment & (cbAlignment - 1)));
Assert((cbAlignment & ~(cbAlignment - 1)) == cbAlignment);
if (cbAlignment < RTHEAPOFFSET_ALIGNMENT)
cbAlignment = RTHEAPOFFSET_ALIGNMENT;
}
/*
* Do the allocation.
*/
pBlock = rtHeapOffsetAllocBlock(pHeapInt, cb, cbAlignment);
if (RT_LIKELY(pBlock))
{
void *pv = pBlock + 1;
return pv;
}
return NULL;
}
RT_EXPORT_SYMBOL(RTHeapOffsetAlloc);
RTDECL(void *) RTHeapOffsetAllocZ(RTHEAPOFFSET hHeap, size_t cb, size_t cbAlignment)
{
PRTHEAPOFFSETINTERNAL pHeapInt = hHeap;
PRTHEAPOFFSETBLOCK pBlock;
/*
* Validate and adjust the input.
*/
AssertPtrReturn(pHeapInt, NULL);
if (cb < RTHEAPOFFSET_MIN_BLOCK)
cb = RTHEAPOFFSET_MIN_BLOCK;
else
cb = RT_ALIGN_Z(cb, RTHEAPOFFSET_ALIGNMENT);
if (!cbAlignment)
cbAlignment = RTHEAPOFFSET_ALIGNMENT;
else
{
Assert(!(cbAlignment & (cbAlignment - 1)));
Assert((cbAlignment & ~(cbAlignment - 1)) == cbAlignment);
if (cbAlignment < RTHEAPOFFSET_ALIGNMENT)
cbAlignment = RTHEAPOFFSET_ALIGNMENT;
}
/*
* Do the allocation.
*/
pBlock = rtHeapOffsetAllocBlock(pHeapInt, cb, cbAlignment);
if (RT_LIKELY(pBlock))
{
void *pv = pBlock + 1;
memset(pv, 0, cb);
return pv;
}
return NULL;
}
RT_EXPORT_SYMBOL(RTHeapOffsetAllocZ);
/**
* Allocates a block of memory from the specified heap.
*
* No parameter validation or adjustment is performed.
*
* @returns Pointer to the allocated block.
* @returns NULL on failure.
*
* @param pHeapInt The heap.
* @param cb Size of the memory block to allocate.
* @param uAlignment The alignment specifications for the allocated block.
*/
static PRTHEAPOFFSETBLOCK rtHeapOffsetAllocBlock(PRTHEAPOFFSETINTERNAL pHeapInt, size_t cb, size_t uAlignment)
{
PRTHEAPOFFSETBLOCK pRet = NULL;
PRTHEAPOFFSETFREE pFree;
AssertReturn((pHeapInt)->u32Magic == RTHEAPOFFSET_MAGIC, NULL);
#ifdef RTHEAPOFFSET_STRICT
rtHeapOffsetAssertAll(pHeapInt);
#endif
/*
* Search for a fitting block from the lower end of the heap.
*/
for (pFree = RTHEAPOFF_TO_PTR_N(pHeapInt, pHeapInt->offFreeHead, PRTHEAPOFFSETFREE);
pFree;
pFree = RTHEAPOFF_TO_PTR_N(pHeapInt, pFree->offNext, PRTHEAPOFFSETFREE))
{
uintptr_t offAlign;
ASSERT_BLOCK_FREE(pHeapInt, pFree);
/*
* Match for size and alignment.
*/
if (pFree->cb < cb)
continue;
offAlign = (uintptr_t)(&pFree->Core + 1) & (uAlignment - 1);
if (offAlign)
{
PRTHEAPOFFSETFREE pPrev;
offAlign = (uintptr_t)(&pFree[1].Core + 1) & (uAlignment - 1);
offAlign = uAlignment - offAlign;
if (pFree->cb < cb + offAlign + sizeof(RTHEAPOFFSETFREE))
continue;
/*
* Split up the free block into two, so that the 2nd is aligned as
* per specification.
*/
pPrev = pFree;
pFree = (PRTHEAPOFFSETFREE)((uintptr_t)(pFree + 1) + offAlign);
pFree->Core.offPrev = pPrev->Core.offSelf;
pFree->Core.offNext = pPrev->Core.offNext;
pFree->Core.offSelf = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
pFree->Core.fFlags = RTHEAPOFFSETBLOCK_FLAGS_MAGIC | RTHEAPOFFSETBLOCK_FLAGS_FREE;
pFree->offPrev = pPrev->Core.offSelf;
pFree->offNext = pPrev->offNext;
pFree->cb = (pFree->Core.offNext ? pFree->Core.offNext : pHeapInt->cbHeap)
- pFree->Core.offSelf - sizeof(RTHEAPOFFSETBLOCK);
pPrev->Core.offNext = pFree->Core.offSelf;
pPrev->offNext = pFree->Core.offSelf;
pPrev->cb = pFree->Core.offSelf - pPrev->Core.offSelf - sizeof(RTHEAPOFFSETBLOCK);
if (pFree->Core.offNext)
RTHEAPOFF_TO_PTR(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETBLOCK)->offPrev = pFree->Core.offSelf;
if (pFree->offNext)
RTHEAPOFF_TO_PTR(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETFREE)->offPrev = pFree->Core.offSelf;
else
pHeapInt->offFreeTail = pFree->Core.offSelf;
pHeapInt->cbFree -= sizeof(RTHEAPOFFSETBLOCK);
ASSERT_BLOCK_FREE(pHeapInt, pPrev);
ASSERT_BLOCK_FREE(pHeapInt, pFree);
}
/*
* Split off a new FREE block?
*/
if (pFree->cb >= cb + RT_ALIGN_Z(sizeof(RTHEAPOFFSETFREE), RTHEAPOFFSET_ALIGNMENT))
{
/*
* Create a new FREE block at then end of this one.
*/
PRTHEAPOFFSETFREE pNew = (PRTHEAPOFFSETFREE)((uintptr_t)&pFree->Core + cb + sizeof(RTHEAPOFFSETBLOCK));
pNew->Core.offSelf = RTHEAPOFF_TO_OFF(pHeapInt, pNew);
pNew->Core.offNext = pFree->Core.offNext;
if (pFree->Core.offNext)
RTHEAPOFF_TO_PTR(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETBLOCK)->offPrev = pNew->Core.offSelf;
pNew->Core.offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
pNew->Core.fFlags = RTHEAPOFFSETBLOCK_FLAGS_MAGIC | RTHEAPOFFSETBLOCK_FLAGS_FREE;
pNew->offNext = pFree->offNext;
if (pNew->offNext)
RTHEAPOFF_TO_PTR(pHeapInt, pNew->offNext, PRTHEAPOFFSETFREE)->offPrev = pNew->Core.offSelf;
else
pHeapInt->offFreeTail = pNew->Core.offSelf;
pNew->offPrev = pFree->offPrev;
if (pNew->offPrev)
RTHEAPOFF_TO_PTR(pHeapInt, pNew->offPrev, PRTHEAPOFFSETFREE)->offNext = pNew->Core.offSelf;
else
pHeapInt->offFreeHead = pNew->Core.offSelf;
pNew->cb = (pNew->Core.offNext ? pNew->Core.offNext : pHeapInt->cbHeap) \
- pNew->Core.offSelf - sizeof(RTHEAPOFFSETBLOCK);
ASSERT_BLOCK_FREE(pHeapInt, pNew);
/*
* Adjust and convert the old FREE node into a USED node.
*/
pFree->Core.fFlags &= ~RTHEAPOFFSETBLOCK_FLAGS_FREE;
pFree->Core.offNext = pNew->Core.offSelf;
pHeapInt->cbFree -= pFree->cb;
pHeapInt->cbFree += pNew->cb;
pRet = &pFree->Core;
ASSERT_BLOCK_USED(pHeapInt, pRet);
}
else
{
/*
* Link it out of the free list.
*/
if (pFree->offNext)
RTHEAPOFF_TO_PTR(pHeapInt, pFree->offNext, PRTHEAPOFFSETFREE)->offPrev = pFree->offPrev;
else
pHeapInt->offFreeTail = pFree->offPrev;
if (pFree->offPrev)
RTHEAPOFF_TO_PTR(pHeapInt, pFree->offPrev, PRTHEAPOFFSETFREE)->offNext = pFree->offNext;
else
pHeapInt->offFreeHead = pFree->offNext;
/*
* Convert it to a used block.
*/
pHeapInt->cbFree -= pFree->cb;
pFree->Core.fFlags &= ~RTHEAPOFFSETBLOCK_FLAGS_FREE;
pRet = &pFree->Core;
ASSERT_BLOCK_USED(pHeapInt, pRet);
}
break;
}
#ifdef RTHEAPOFFSET_STRICT
rtHeapOffsetAssertAll(pHeapInt);
#endif
return pRet;
}
RTDECL(void) RTHeapOffsetFree(RTHEAPOFFSET hHeap, void *pv)
{
PRTHEAPOFFSETINTERNAL pHeapInt;
PRTHEAPOFFSETBLOCK pBlock;
/*
* Validate input.
*/
if (!pv)
return;
AssertPtr(pv);
Assert(RT_ALIGN_P(pv, RTHEAPOFFSET_ALIGNMENT) == pv);
/*
* Get the block and heap. If in strict mode, validate these.
*/
pBlock = (PRTHEAPOFFSETBLOCK)pv - 1;
pHeapInt = RTHEAPOFF_GET_ANCHOR(pBlock);
ASSERT_BLOCK_USED(pHeapInt, pBlock);
ASSERT_ANCHOR(pHeapInt);
Assert(pHeapInt == (PRTHEAPOFFSETINTERNAL)hHeap || !hHeap);
#ifdef RTHEAPOFFSET_FREE_POISON
/*
* Poison the block.
*/
const size_t cbBlock = (pBlock->pNext ? (uintptr_t)pBlock->pNext : (uintptr_t)pHeapInt->pvEnd)
- (uintptr_t)pBlock - sizeof(RTHEAPOFFSETBLOCK);
memset(pBlock + 1, RTHEAPOFFSET_FREE_POISON, cbBlock);
#endif
/*
* Call worker which does the actual job.
*/
rtHeapOffsetFreeBlock(pHeapInt, pBlock);
}
RT_EXPORT_SYMBOL(RTHeapOffsetFree);
/**
* Free a memory block.
*
* @param pHeapInt The heap.
* @param pBlock The memory block to free.
*/
static void rtHeapOffsetFreeBlock(PRTHEAPOFFSETINTERNAL pHeapInt, PRTHEAPOFFSETBLOCK pBlock)
{
PRTHEAPOFFSETFREE pFree = (PRTHEAPOFFSETFREE)pBlock;
PRTHEAPOFFSETFREE pLeft;
PRTHEAPOFFSETFREE pRight;
#ifdef RTHEAPOFFSET_STRICT
rtHeapOffsetAssertAll(pHeapInt);
#endif
/*
* Look for the closest free list blocks by walking the blocks right
* of us (both lists are sorted by address).
*/
pLeft = NULL;
pRight = NULL;
if (pHeapInt->offFreeTail)
{
pRight = RTHEAPOFF_TO_PTR_N(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETFREE);
while (pRight && !RTHEAPOFFSETBLOCK_IS_FREE(&pRight->Core))
{
ASSERT_BLOCK(pHeapInt, &pRight->Core);
pRight = RTHEAPOFF_TO_PTR_N(pHeapInt, pRight->Core.offNext, PRTHEAPOFFSETFREE);
}
if (!pRight)
pLeft = RTHEAPOFF_TO_PTR_N(pHeapInt, pHeapInt->offFreeTail, PRTHEAPOFFSETFREE);
else
{
ASSERT_BLOCK_FREE(pHeapInt, pRight);
pLeft = RTHEAPOFF_TO_PTR_N(pHeapInt, pRight->offPrev, PRTHEAPOFFSETFREE);
}
if (pLeft)
ASSERT_BLOCK_FREE(pHeapInt, pLeft);
}
AssertMsgReturnVoid(pLeft != pFree, ("Freed twice! pv=%p (pBlock=%p)\n", pBlock + 1, pBlock));
ASSERT_L(RTHEAPOFF_TO_OFF(pHeapInt, pLeft), RTHEAPOFF_TO_OFF(pHeapInt, pFree));
Assert(!pRight || (uintptr_t)pRight > (uintptr_t)pFree);
Assert(!pLeft || RTHEAPOFF_TO_PTR_N(pHeapInt, pLeft->offNext, PRTHEAPOFFSETFREE) == pRight);
/*
* Insert at the head of the free block list?
*/
if (!pLeft)
{
Assert(pRight == RTHEAPOFF_TO_PTR_N(pHeapInt, pHeapInt->offFreeHead, PRTHEAPOFFSETFREE));
pFree->Core.fFlags |= RTHEAPOFFSETBLOCK_FLAGS_FREE;
pFree->offPrev = 0;
pFree->offNext = RTHEAPOFF_TO_OFF(pHeapInt, pRight);
if (pRight)
pRight->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
else
pHeapInt->offFreeTail = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
pHeapInt->offFreeHead = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
}
else
{
/*
* Can we merge with left hand free block?
*/
if (pLeft->Core.offNext == RTHEAPOFF_TO_OFF(pHeapInt, pFree))
{
pLeft->Core.offNext = pFree->Core.offNext;
if (pFree->Core.offNext)
RTHEAPOFF_TO_PTR(pHeapInt, pFree->Core.offNext, PRTHEAPOFFSETBLOCK)->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pLeft);
pHeapInt->cbFree -= pLeft->cb;
pFree = pLeft;
}
/*
* No, just link it into the free list then.
*/
else
{
pFree->Core.fFlags |= RTHEAPOFFSETBLOCK_FLAGS_FREE;
pFree->offNext = RTHEAPOFF_TO_OFF(pHeapInt, pRight);
pFree->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pLeft);
pLeft->offNext = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
if (pRight)
pRight->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
else
pHeapInt->offFreeTail = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
}
}
/*
* Can we merge with right hand free block?
*/
if ( pRight
&& pRight->Core.offPrev == RTHEAPOFF_TO_OFF(pHeapInt, pFree))
{
/* core */
pFree->Core.offNext = pRight->Core.offNext;
if (pRight->Core.offNext)
RTHEAPOFF_TO_PTR(pHeapInt, pRight->Core.offNext, PRTHEAPOFFSETBLOCK)->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
/* free */
pFree->offNext = pRight->offNext;
if (pRight->offNext)
RTHEAPOFF_TO_PTR(pHeapInt, pRight->offNext, PRTHEAPOFFSETFREE)->offPrev = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
else
pHeapInt->offFreeTail = RTHEAPOFF_TO_OFF(pHeapInt, pFree);
pHeapInt->cbFree -= pRight->cb;
}
/*
* Calculate the size and update free stats.
*/
pFree->cb = (pFree->Core.offNext ? pFree->Core.offNext : pHeapInt->cbHeap)
- RTHEAPOFF_TO_OFF(pHeapInt, pFree) - sizeof(RTHEAPOFFSETBLOCK);
pHeapInt->cbFree += pFree->cb;
ASSERT_BLOCK_FREE(pHeapInt, pFree);
#ifdef RTHEAPOFFSET_STRICT
rtHeapOffsetAssertAll(pHeapInt);
#endif
}
#ifdef RTHEAPOFFSET_STRICT
/**
* Internal consistency check (relying on assertions).
* @param pHeapInt
*/
static void rtHeapOffsetAssertAll(PRTHEAPOFFSETINTERNAL pHeapInt)
{
PRTHEAPOFFSETFREE pPrev = NULL;
PRTHEAPOFFSETFREE pPrevFree = NULL;
PRTHEAPOFFSETFREE pBlock;
for (pBlock = (PRTHEAPOFFSETFREE)(pHeapInt + 1);
pBlock;
pBlock = RTHEAPOFF_TO_PTR_N(pHeapInt, pBlock->Core.offNext, PRTHEAPOFFSETFREE))
{
if (RTHEAPOFFSETBLOCK_IS_FREE(&pBlock->Core))
{
ASSERT_BLOCK_FREE(pHeapInt, pBlock);
Assert(pBlock->offPrev == RTHEAPOFF_TO_OFF(pHeapInt, pPrevFree));
Assert(pPrevFree || pHeapInt->offFreeHead == RTHEAPOFF_TO_OFF(pHeapInt, pBlock));
pPrevFree = pBlock;
}
else
ASSERT_BLOCK_USED(pHeapInt, &pBlock->Core);
Assert(!pPrev || RTHEAPOFF_TO_OFF(pHeapInt, pPrev) == pBlock->Core.offPrev);
pPrev = pBlock;
}
Assert(pHeapInt->offFreeTail == RTHEAPOFF_TO_OFF(pHeapInt, pPrevFree));
}
#endif
RTDECL(size_t) RTHeapOffsetSize(RTHEAPOFFSET hHeap, void *pv)
{
PRTHEAPOFFSETINTERNAL pHeapInt;
PRTHEAPOFFSETBLOCK pBlock;
size_t cbBlock;
/*
* Validate input.
*/
if (!pv)
return 0;
AssertPtrReturn(pv, 0);
AssertReturn(RT_ALIGN_P(pv, RTHEAPOFFSET_ALIGNMENT) == pv, 0);
/*
* Get the block and heap. If in strict mode, validate these.
*/
pBlock = (PRTHEAPOFFSETBLOCK)pv - 1;
pHeapInt = RTHEAPOFF_GET_ANCHOR(pBlock);
ASSERT_BLOCK_USED(pHeapInt, pBlock);
ASSERT_ANCHOR(pHeapInt);
Assert(pHeapInt == (PRTHEAPOFFSETINTERNAL)hHeap || !hHeap);
/*
* Calculate the block size.
*/
cbBlock = (pBlock->offNext ? pBlock->offNext : pHeapInt->cbHeap)
- RTHEAPOFF_TO_OFF(pHeapInt, pBlock) - sizeof(RTHEAPOFFSETBLOCK);
return cbBlock;
}
RT_EXPORT_SYMBOL(RTHeapOffsetSize);
RTDECL(size_t) RTHeapOffsetGetHeapSize(RTHEAPOFFSET hHeap)
{
PRTHEAPOFFSETINTERNAL pHeapInt;
if (hHeap == NIL_RTHEAPOFFSET)
return 0;
pHeapInt = hHeap;
AssertPtrReturn(pHeapInt, 0);
ASSERT_ANCHOR(pHeapInt);
return pHeapInt->cbHeap;
}
RT_EXPORT_SYMBOL(RTHeapOffsetGetHeapSize);
RTDECL(size_t) RTHeapOffsetGetFreeSize(RTHEAPOFFSET hHeap)
{
PRTHEAPOFFSETINTERNAL pHeapInt;
if (hHeap == NIL_RTHEAPOFFSET)
return 0;
pHeapInt = hHeap;
AssertPtrReturn(pHeapInt, 0);
ASSERT_ANCHOR(pHeapInt);
return pHeapInt->cbFree;
}
RT_EXPORT_SYMBOL(RTHeapOffsetGetFreeSize);
RTDECL(void) RTHeapOffsetDump(RTHEAPOFFSET hHeap, PFNRTHEAPOFFSETPRINTF pfnPrintf)
{
PRTHEAPOFFSETINTERNAL pHeapInt = (PRTHEAPOFFSETINTERNAL)hHeap;
PRTHEAPOFFSETFREE pBlock;
pfnPrintf("**** Dumping Heap %p - cbHeap=%x cbFree=%x ****\n",
hHeap, pHeapInt->cbHeap, pHeapInt->cbFree);
for (pBlock = (PRTHEAPOFFSETFREE)(pHeapInt + 1);
pBlock;
pBlock = RTHEAPOFF_TO_PTR_N(pHeapInt, pBlock->Core.offNext, PRTHEAPOFFSETFREE))
{
size_t cb = (pBlock->offNext ? pBlock->Core.offNext : pHeapInt->cbHeap)
- RTHEAPOFF_TO_OFF(pHeapInt, pBlock) - sizeof(RTHEAPOFFSETBLOCK);
if (RTHEAPOFFSETBLOCK_IS_FREE(&pBlock->Core))
pfnPrintf("%p %06x FREE offNext=%06x offPrev=%06x fFlags=%#x cb=%#06x : cb=%#06x offNext=%06x offPrev=%06x\n",
pBlock, pBlock->Core.offSelf, pBlock->Core.offNext, pBlock->Core.offPrev, pBlock->Core.fFlags, cb,
pBlock->cb, pBlock->offNext, pBlock->offPrev);
else
pfnPrintf("%p %06x USED offNext=%06x offPrev=%06x fFlags=%#x cb=%#06x\n",
pBlock, pBlock->Core.offSelf, pBlock->Core.offNext, pBlock->Core.offPrev, pBlock->Core.fFlags, cb);
}
pfnPrintf("**** Done dumping Heap %p ****\n", hHeap);
}
RT_EXPORT_SYMBOL(RTHeapOffsetDump);
../r0drv
\ No newline at end of file
/* $Id: vbox_drv.c $ */
/** @file
* VirtualBox Additions Linux kernel video driver
*/
/*
* Copyright (C) 2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
* --------------------------------------------------------------------
*
* This code is based on
* ast_drv.c
* with the following copyright and permission notice:
*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include "vbox_drv.h"
#include <VBox/VBoxGuest.h>
#include <linux/module.h>
#include <linux/console.h>
#include <linux/vt_kern.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
#include "version-generated.h"
int vbox_modeset = -1;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, vbox_modeset, int, 0400);
static struct drm_driver driver;
static DEFINE_PCI_DEVICE_TABLE(pciidlist) =
{
{0x80ee, 0xbeef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0},
{0, 0, 0},
};
MODULE_DEVICE_TABLE(pci, pciidlist);
static int vbox_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
return drm_get_pci_dev(pdev, ent, &driver);
}
static void vbox_pci_remove(struct pci_dev *pdev)
{
struct drm_device *dev = pci_get_drvdata(pdev);
drm_put_dev(dev);
}
static int vbox_drm_freeze(struct drm_device *dev)
{
drm_kms_helper_poll_disable(dev);
pci_save_state(dev->pdev);
console_lock();
vbox_fbdev_set_suspend(dev, 1);
console_unlock();
return 0;
}
static int vbox_drm_thaw(struct drm_device *dev)
{
int error = 0;
drm_mode_config_reset(dev);
drm_helper_resume_force_mode(dev);
console_lock();
vbox_fbdev_set_suspend(dev, 0);
console_unlock();
return error;
}
static int vbox_drm_resume(struct drm_device *dev)
{
int ret;
if (pci_enable_device(dev->pdev))
return -EIO;
ret = vbox_drm_thaw(dev);
if (ret)
return ret;
drm_kms_helper_poll_enable(dev);
return 0;
}
static int vbox_pm_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
int error;
error = vbox_drm_freeze(ddev);
if (error)
return error;
pci_disable_device(pdev);
pci_set_power_state(pdev, PCI_D3hot);
return 0;
}
static int vbox_pm_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return vbox_drm_resume(ddev);
}
static int vbox_pm_freeze(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
if (!ddev || !ddev->dev_private)
return -ENODEV;
return vbox_drm_freeze(ddev);
}
static int vbox_pm_thaw(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return vbox_drm_thaw(ddev);
}
static int vbox_pm_poweroff(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *ddev = pci_get_drvdata(pdev);
return vbox_drm_freeze(ddev);
}
static const struct dev_pm_ops vbox_pm_ops = {
.suspend = vbox_pm_suspend,
.resume = vbox_pm_resume,
.freeze = vbox_pm_freeze,
.thaw = vbox_pm_thaw,
.poweroff = vbox_pm_poweroff,
.restore = vbox_pm_resume,
};
static struct pci_driver vbox_pci_driver =
{
.name = DRIVER_NAME,
.id_table = pciidlist,
.probe = vbox_pci_probe,
.remove = vbox_pci_remove,
.driver.pm = &vbox_pm_ops,
};
static const struct file_operations vbox_fops =
{
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
.mmap = vbox_mmap,
.poll = drm_poll,
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
.fasync = drm_fasync,
#endif
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
.read = drm_read,
};
static int vbox_master_set(struct drm_device *dev,
struct drm_file *file_priv,
bool from_open)
{
struct vbox_private *vbox = dev->dev_private;
vbox->initial_mode_queried = false;
vbox_disable_accel(vbox);
return 0;
}
static void vbox_master_drop(struct drm_device *dev,
struct drm_file *file_priv,
bool from_release)
{
struct vbox_private *vbox = dev->dev_private;
vbox->initial_mode_queried = false;
vbox_disable_accel(vbox);
}
static struct drm_driver driver =
{
.driver_features = DRIVER_MODESET | DRIVER_GEM | DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED,
.dev_priv_size = 0,
.load = vbox_driver_load,
.unload = vbox_driver_unload,
.lastclose = vbox_driver_lastclose,
.master_set = vbox_master_set,
.master_drop = vbox_master_drop,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
.set_busid = drm_pci_set_busid,
#endif
.fops = &vbox_fops,
.irq_handler = vbox_irq_handler,
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
.gem_free_object = vbox_gem_free_object,
.dumb_create = vbox_dumb_create,
.dumb_map_offset = vbox_dumb_mmap_offset,
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
.dumb_destroy = vbox_dumb_destroy,
#else
.dumb_destroy = drm_gem_dumb_destroy,
#endif
};
static int __init vbox_init(void)
{
unsigned i;
#ifdef CONFIG_VGA_CONSOLE
if (vgacon_text_force() && vbox_modeset == -1)
return -EINVAL;
#endif
if (vbox_modeset == 0)
return -EINVAL;
/* Do not load if any of the virtual consoles is in graphics mode to be
* sure that we do not pick a fight with a user-mode driver or VESA. */
for (i = 0; i < MAX_NR_CONSOLES - 1; ++i)
if (vc_cons[i].d && vc_cons[i].d->vc_mode == KD_GRAPHICS)
return -EINVAL;
return drm_pci_init(&driver, &vbox_pci_driver);
}
static void __exit vbox_exit(void)
{
drm_pci_exit(&driver, &vbox_pci_driver);
}
module_init(vbox_init);
module_exit(vbox_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL and additional rights");
#ifdef MODULE_VERSION
MODULE_VERSION(VBOX_VERSION_STRING);
#endif
/* $Id: vbox_drv.h $ */
/** @file
* VirtualBox Additions Linux kernel video driver
*/
/*
* Copyright (C) 2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
* --------------------------------------------------------------------
*
* This code is based on
* ast_drv.h
* with the following copyright and permission notice:
*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
#ifndef __VBOX_DRV_H__
#define __VBOX_DRV_H__
#define LOG_GROUP LOG_GROUP_DEV_VGA
#include "the-linux-kernel.h"
#include <VBox/VBoxVideoGuest.h>
#include <VBox/log.h>
#include <drm/drmP.h>
#include <drm/drm_fb_helper.h>
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
#include <drm/ttm/ttm_memory.h>
#include <drm/ttm/ttm_module.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
# include <drm/drm_gem.h>
#endif
/* #include "vboxvideo.h" */
#include "product-generated.h"
#define DRIVER_AUTHOR VBOX_VENDOR
#define DRIVER_NAME "vboxvideo"
#define DRIVER_DESC VBOX_PRODUCT " Graphics Card"
#define DRIVER_DATE "20130823"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#define VBOX_MAX_CURSOR_WIDTH 64
#define VBOX_MAX_CURSOR_HEIGHT 64
#define CURSOR_PIXEL_COUNT VBOX_MAX_CURSOR_WIDTH * VBOX_MAX_CURSOR_HEIGHT
#define CURSOR_DATA_SIZE CURSOR_PIXEL_COUNT * 4 + CURSOR_PIXEL_COUNT / 8
struct vbox_fbdev;
struct vbox_private {
struct drm_device *dev;
uint8_t __iomem *vram;
HGSMIGUESTCOMMANDCONTEXT submit_info;
struct VBVABUFFERCONTEXT *vbva_info;
bool any_pitch;
unsigned num_crtcs;
bool vga2_clone;
/** Amount of available VRAM, including space used for buffers. */
uint32_t full_vram_size;
/** Amount of available VRAM, not including space used for buffers. */
uint32_t vram_size;
/** Offset to the host flags in the VRAM. */
uint32_t host_flags_offset;
/** Array of structures for receiving mode hints. */
VBVAMODEHINT *last_mode_hints;
struct vbox_fbdev *fbdev;
int fb_mtrr;
struct {
struct drm_global_reference mem_global_ref;
struct ttm_bo_global_ref bo_global_ref;
struct ttm_bo_device bdev;
bool mm_initialised;
} ttm;
struct mutex hw_mutex;
bool isr_installed;
/** We decide whether or not user-space supports display hot-plug
* depending on whether they react to a hot-plug event after the initial
* mode query. */
bool initial_mode_queried;
struct work_struct hotplug_work;
uint32_t input_mapping_width;
uint32_t input_mapping_height;
uint32_t cursor_width;
uint32_t cursor_height;
uint32_t cursor_hot_x;
uint32_t cursor_hot_y;
size_t cursor_data_size;
uint8_t cursor_data[CURSOR_DATA_SIZE];
};
#undef CURSOR_PIXEL_COUNT
#undef CURSOR_DATA_SIZE
int vbox_driver_load(struct drm_device *dev, unsigned long flags);
int vbox_driver_unload(struct drm_device *dev);
void vbox_driver_lastclose(struct drm_device *dev);
struct vbox_gem_object;
#ifndef VGA_PORT_HGSMI_HOST
# define VGA_PORT_HGSMI_HOST 0x3b0
# define VGA_PORT_HGSMI_GUEST 0x3d0
#endif
struct vbox_connector {
struct drm_connector base;
char name[32];
struct vbox_crtc *vbox_crtc;
struct {
uint16_t width;
uint16_t height;
bool disconnected;
} mode_hint;
};
struct vbox_crtc {
struct drm_crtc base;
bool blanked;
bool disconnected;
unsigned crtc_id;
uint32_t fb_offset;
bool cursor_enabled;
};
struct vbox_encoder {
struct drm_encoder base;
};
struct vbox_framebuffer {
struct drm_framebuffer base;
struct drm_gem_object *obj;
};
struct vbox_fbdev {
struct drm_fb_helper helper;
struct vbox_framebuffer afb;
void *sysram;
int size;
struct ttm_bo_kmap_obj mapping;
int x1, y1, x2, y2; /* dirty rect */
spinlock_t dirty_lock;
};
#define to_vbox_crtc(x) container_of(x, struct vbox_crtc, base)
#define to_vbox_connector(x) container_of(x, struct vbox_connector, base)
#define to_vbox_encoder(x) container_of(x, struct vbox_encoder, base)
#define to_vbox_framebuffer(x) container_of(x, struct vbox_framebuffer, base)
extern int vbox_mode_init(struct drm_device *dev);
extern void vbox_mode_fini(struct drm_device *dev);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
# define DRM_MODE_FB_CMD drm_mode_fb_cmd
#else
# define DRM_MODE_FB_CMD drm_mode_fb_cmd2
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
# define CRTC_FB(crtc) (crtc)->fb
#else
# define CRTC_FB(crtc) (crtc)->primary->fb
#endif
void vbox_enable_accel(struct vbox_private *vbox);
void vbox_disable_accel(struct vbox_private *vbox);
void vbox_enable_caps(struct vbox_private *vbox);
void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
struct drm_clip_rect *rects,
unsigned num_rects);
int vbox_framebuffer_init(struct drm_device *dev,
struct vbox_framebuffer *vbox_fb,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
const
#endif
struct DRM_MODE_FB_CMD *mode_cmd,
struct drm_gem_object *obj);
int vbox_fbdev_init(struct drm_device *dev);
void vbox_fbdev_fini(struct drm_device *dev);
void vbox_fbdev_set_suspend(struct drm_device *dev, int state);
struct vbox_bo {
struct ttm_buffer_object bo;
struct ttm_placement placement;
struct ttm_bo_kmap_obj kmap;
struct drm_gem_object gem;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
u32 placements[3];
#else
struct ttm_place placements[3];
#endif
int pin_count;
};
#define gem_to_vbox_bo(gobj) container_of((gobj), struct vbox_bo, gem)
static inline struct vbox_bo *
vbox_bo(struct ttm_buffer_object *bo)
{
return container_of(bo, struct vbox_bo, bo);
}
#define to_vbox_obj(x) container_of(x, struct vbox_gem_object, base)
extern int vbox_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
extern int vbox_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle);
#endif
extern void vbox_gem_free_object(struct drm_gem_object *obj);
extern int vbox_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
uint32_t handle,
uint64_t *offset);
#define DRM_FILE_PAGE_OFFSET (0x100000000ULL >> PAGE_SHIFT)
int vbox_mm_init(struct vbox_private *vbox);
void vbox_mm_fini(struct vbox_private *vbox);
int vbox_bo_create(struct drm_device *dev, int size, int align,
uint32_t flags, struct vbox_bo **pvboxbo);
int vbox_gem_create(struct drm_device *dev,
u32 size, bool iskernel,
struct drm_gem_object **obj);
int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr);
int vbox_bo_unpin(struct vbox_bo *bo);
static inline int vbox_bo_reserve(struct vbox_bo *bo, bool no_wait)
{
int ret;
ret = ttm_bo_reserve(&bo->bo, true, no_wait, false, 0);
if (ret)
{
if (ret != -ERESTARTSYS && ret != -EBUSY)
DRM_ERROR("reserve failed %p\n", bo);
return ret;
}
return 0;
}
static inline void vbox_bo_unreserve(struct vbox_bo *bo)
{
ttm_bo_unreserve(&bo->bo);
}
void vbox_ttm_placement(struct vbox_bo *bo, int domain);
int vbox_bo_push_sysram(struct vbox_bo *bo);
int vbox_mmap(struct file *filp, struct vm_area_struct *vma);
/* vbox_irq.c */
int vbox_irq_init(struct vbox_private *vbox);
void vbox_irq_fini(struct vbox_private *vbox);
void vbox_report_hotplug(struct vbox_private *vbox);
irqreturn_t vbox_irq_handler(int irq, void *arg);
#endif
/* $Id: vbox_dummy.c $ */
/** @file
* VirtualBox Additions Linux kernel video driver, dummy driver for
* older kernels.
*/
/*
* Copyright (C) 2016 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
*/
#include <linux/module.h>
static int __init vbox_init(void)
{
return -EINVAL;
}
static void __exit vbox_exit(void)
{
}
module_init(vbox_init);
module_exit(vbox_exit);
MODULE_LICENSE("GPL");
/* $Id: vbox_fb.c $ */
/** @file
* VirtualBox Additions Linux kernel video driver
*/
/*
* Copyright (C) 2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
* --------------------------------------------------------------------
*
* This code is based on
* ast_fb.c
* with the following copyright and permission notice:
*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
/* Include from most specific to most general to be able to override things. */
#include "vbox_drv.h"
#include <VBox/VBoxVideo.h>
#include <VBox/VMMDev.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/tty.h>
#include <linux/sysrq.h>
#include <linux/delay.h>
#include <linux/fb.h>
#include <linux/init.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
#include "vbox_drv.h"
/**
* Tell the host about dirty rectangles to update.
*/
static void vbox_dirty_update(struct vbox_fbdev *fbdev,
int x, int y, int width, int height)
{
struct drm_device *dev = fbdev->helper.dev;
int i;
struct drm_gem_object *obj;
struct vbox_bo *bo;
int src_offset, dst_offset;
int bpp = (fbdev->afb.base.bits_per_pixel + 7)/8;
int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
struct drm_clip_rect rect;
LogFunc(("vboxvideo: %d\n", __LINE__));
obj = fbdev->afb.obj;
bo = gem_to_vbox_bo(obj);
/*
* try and reserve the BO, if we fail with busy
* then the BO is being moved and we should
* store up the damage until later.
*/
if (drm_can_sleep())
ret = vbox_bo_reserve(bo, true);
if (ret) {
if (ret != -EBUSY)
return;
store_for_later = true;
}
x2 = x + width - 1;
y2 = y + height - 1;
spin_lock_irqsave(&fbdev->dirty_lock, flags);
if (fbdev->y1 < y)
y = fbdev->y1;
if (fbdev->y2 > y2)
y2 = fbdev->y2;
if (fbdev->x1 < x)
x = fbdev->x1;
if (fbdev->x2 > x2)
x2 = fbdev->x2;
if (store_for_later) {
fbdev->x1 = x;
fbdev->x2 = x2;
fbdev->y1 = y;
fbdev->y2 = y2;
spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
LogFunc(("vboxvideo: %d\n", __LINE__));
return;
}
fbdev->x1 = fbdev->y1 = INT_MAX;
fbdev->x2 = fbdev->y2 = 0;
spin_unlock_irqrestore(&fbdev->dirty_lock, flags);
if (!bo->kmap.virtual) {
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret) {
DRM_ERROR("failed to kmap fb updates\n");
vbox_bo_unreserve(bo);
return;
}
unmap = true;
}
for (i = y; i <= y2; i++) {
/* assume equal stride for now */
src_offset = dst_offset = i * fbdev->afb.base.pitches[0] + (x * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, (char *)fbdev->sysram + src_offset, (x2 - x + 1) * bpp);
}
/* Not sure why the original code subtracted 1 here, but I will keep it that
* way to avoid unnecessary differences. */
rect.x1 = x;
rect.x2 = x2 + 1;
rect.y1 = y;
rect.y2 = y2 + 1;
vbox_framebuffer_dirty_rectangles(&fbdev->afb.base, &rect, 1);
LogFunc(("vboxvideo: %d, bo->kmap.virtual=%p, fbdev->sysram=%p, x=%d, y=%d, x2=%d, y2=%d, unmap=%RTbool\n",
__LINE__, bo->kmap.virtual, fbdev->sysram, (int)x, (int)y, (int)x2, (int)y2, unmap));
if (unmap)
ttm_bo_kunmap(&bo->kmap);
vbox_bo_unreserve(bo);
}
static void vbox_fillrect(struct fb_info *info,
const struct fb_fillrect *rect)
{
struct vbox_fbdev *fbdev = info->par;
LogFunc(("vboxvideo: %d\n", __LINE__));
sys_fillrect(info, rect);
vbox_dirty_update(fbdev, rect->dx, rect->dy, rect->width,
rect->height);
}
static void vbox_copyarea(struct fb_info *info,
const struct fb_copyarea *area)
{
struct vbox_fbdev *fbdev = info->par;
LogFunc(("vboxvideo: %d\n", __LINE__));
sys_copyarea(info, area);
vbox_dirty_update(fbdev, area->dx, area->dy, area->width,
area->height);
}
static void vbox_imageblit(struct fb_info *info,
const struct fb_image *image)
{
struct vbox_fbdev *fbdev = info->par;
LogFunc(("vboxvideo: %d\n", __LINE__));
sys_imageblit(info, image);
vbox_dirty_update(fbdev, image->dx, image->dy, image->width,
image->height);
}
static struct fb_ops vboxfb_ops = {
.owner = THIS_MODULE,
.fb_check_var = drm_fb_helper_check_var,
.fb_set_par = drm_fb_helper_set_par,
.fb_fillrect = vbox_fillrect,
.fb_copyarea = vbox_copyarea,
.fb_imageblit = vbox_imageblit,
.fb_pan_display = drm_fb_helper_pan_display,
.fb_blank = drm_fb_helper_blank,
.fb_setcmap = drm_fb_helper_setcmap,
.fb_debug_enter = drm_fb_helper_debug_enter,
.fb_debug_leave = drm_fb_helper_debug_leave,
};
static int vboxfb_create_object(struct vbox_fbdev *fbdev,
struct DRM_MODE_FB_CMD *mode_cmd,
struct drm_gem_object **gobj_p)
{
struct drm_device *dev = fbdev->helper.dev;
u32 bpp, depth;
u32 size;
struct drm_gem_object *gobj;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
__u32 pitch = mode_cmd->pitch;
#else
__u32 pitch = mode_cmd->pitches[0];
#endif
int ret = 0;
LogFunc(("vboxvideo: %d\n", __LINE__));
drm_fb_get_bpp_depth(mode_cmd->pixel_format, &depth, &bpp);
size = pitch * mode_cmd->height;
ret = vbox_gem_create(dev, size, true, &gobj);
if (ret)
return ret;
*gobj_p = gobj;
LogFunc(("vboxvideo: %d\n", __LINE__));
return ret;
}
static int vboxfb_create(struct drm_fb_helper *helper,
struct drm_fb_helper_surface_size *sizes)
{
struct vbox_fbdev *fbdev =
container_of(helper, struct vbox_fbdev, helper);
struct drm_device *dev = fbdev->helper.dev;
struct DRM_MODE_FB_CMD mode_cmd;
struct drm_framebuffer *fb;
struct fb_info *info;
__u32 pitch;
int size, ret;
struct device *device = &dev->pdev->dev;
void *sysram;
struct drm_gem_object *gobj = NULL;
struct vbox_bo *bo = NULL;
LogFunc(("vboxvideo: %d\n", __LINE__));
mode_cmd.width = sizes->surface_width;
mode_cmd.height = sizes->surface_height;
pitch = mode_cmd.width * ((sizes->surface_bpp + 7) / 8);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
mode_cmd.bpp = sizes->surface_bpp;
mode_cmd.depth = sizes->surface_depth;
mode_cmd.pitch = pitch;
#else
mode_cmd.pixel_format = drm_mode_legacy_fb_format(sizes->surface_bpp,
sizes->surface_depth);
mode_cmd.pitches[0] = pitch;
#endif
size = pitch * mode_cmd.height;
ret = vboxfb_create_object(fbdev, &mode_cmd, &gobj);
if (ret) {
DRM_ERROR("failed to create fbcon backing object %d\n", ret);
return ret;
}
bo = gem_to_vbox_bo(gobj);
sysram = vmalloc(size);
if (!sysram)
return -ENOMEM;
info = framebuffer_alloc(0, device);
if (!info) {
ret = -ENOMEM;
goto out;
}
info->par = fbdev;
ret = vbox_framebuffer_init(dev, &fbdev->afb, &mode_cmd, gobj);
if (ret)
goto out;
fbdev->sysram = sysram;
fbdev->size = size;
fb = &fbdev->afb.base;
fbdev->helper.fb = fb;
fbdev->helper.fbdev = info;
strcpy(info->fix.id, "vboxdrmfb");
/* The last flag forces a mode set on VT switches even if the kernel does
* not think it is needed. */
info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT
| FBINFO_MISC_ALWAYS_SETPAR;
info->fbops = &vboxfb_ops;
ret = fb_alloc_cmap(&info->cmap, 256, 0);
if (ret) {
ret = -ENOMEM;
goto out;
}
/* This seems to be done for safety checking that the framebuffer is not
* registered twice by different drivers. */
info->apertures = alloc_apertures(1);
if (!info->apertures) {
ret = -ENOMEM;
goto out;
}
info->apertures->ranges[0].base = pci_resource_start(dev->pdev, 0);
info->apertures->ranges[0].size = pci_resource_len(dev->pdev, 0);
drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(info, &fbdev->helper, sizes->fb_width, sizes->fb_height);
info->screen_base = sysram;
info->screen_size = size;
info->pixmap.flags = FB_PIXMAP_SYSTEM;
DRM_DEBUG_KMS("allocated %dx%d\n",
fb->width, fb->height);
LogFunc(("vboxvideo: %d\n", __LINE__));
return 0;
out:
LogFunc(("vboxvideo: %d\n", __LINE__));
return ret;
}
static void vbox_fb_gamma_set(struct drm_crtc *crtc, u16 red, u16 green,
u16 blue, int regno)
{
}
static void vbox_fb_gamma_get(struct drm_crtc *crtc, u16 *red, u16 *green,
u16 *blue, int regno)
{
*red = regno;
*green = regno;
*blue = regno;
}
static struct drm_fb_helper_funcs vbox_fb_helper_funcs = {
.gamma_set = vbox_fb_gamma_set,
.gamma_get = vbox_fb_gamma_get,
.fb_probe = vboxfb_create,
};
static void vbox_fbdev_destroy(struct drm_device *dev,
struct vbox_fbdev *fbdev)
{
struct fb_info *info;
struct vbox_framebuffer *afb = &fbdev->afb;
LogFunc(("vboxvideo: %d\n", __LINE__));
if (fbdev->helper.fbdev) {
info = fbdev->helper.fbdev;
unregister_framebuffer(info);
if (info->cmap.len)
fb_dealloc_cmap(&info->cmap);
framebuffer_release(info);
}
if (afb->obj) {
drm_gem_object_unreference_unlocked(afb->obj);
afb->obj = NULL;
}
drm_fb_helper_fini(&fbdev->helper);
vfree(fbdev->sysram);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
drm_framebuffer_unregister_private(&afb->base);
#endif
drm_framebuffer_cleanup(&afb->base);
LogFunc(("vboxvideo: %d\n", __LINE__));
}
int vbox_fbdev_init(struct drm_device *dev)
{
struct vbox_private *vbox = dev->dev_private;
struct vbox_fbdev *fbdev;
int ret;
LogFunc(("vboxvideo: %d\n", __LINE__));
fbdev = kzalloc(sizeof(struct vbox_fbdev), GFP_KERNEL);
if (!fbdev)
return -ENOMEM;
vbox->fbdev = fbdev;
spin_lock_init(&fbdev->dirty_lock);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
fbdev->helper.funcs = &vbox_fb_helper_funcs;
#else
drm_fb_helper_prepare(dev, &fbdev->helper, &vbox_fb_helper_funcs);
#endif
ret = drm_fb_helper_init(dev, &fbdev->helper, vbox->num_crtcs, vbox->num_crtcs);
if (ret)
goto free;
ret = drm_fb_helper_single_add_all_connectors(&fbdev->helper);
if (ret)
goto fini;
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(dev);
ret = drm_fb_helper_initial_config(&fbdev->helper, 32);
if (ret)
goto fini;
LogFunc(("vboxvideo: %d\n", __LINE__));
return 0;
fini:
drm_fb_helper_fini(&fbdev->helper);
free:
kfree(fbdev);
vbox->fbdev = NULL;
LogFunc(("vboxvideo: %d, ret=%d\n", __LINE__, ret));
return ret;
}
void vbox_fbdev_fini(struct drm_device *dev)
{
struct vbox_private *vbox = dev->dev_private;
if (!vbox->fbdev)
return;
LogFunc(("vboxvideo: %d\n", __LINE__));
vbox_fbdev_destroy(dev, vbox->fbdev);
kfree(vbox->fbdev);
vbox->fbdev = NULL;
}
void vbox_fbdev_set_suspend(struct drm_device *dev, int state)
{
struct vbox_private *vbox = dev->dev_private;
LogFunc(("vboxvideo: %d\n", __LINE__));
if (!vbox->fbdev)
return;
fb_set_suspend(vbox->fbdev->helper.fbdev, state);
}
/* $Id: vbox_irq.c $ */
/** @file
* VirtualBox Additions Linux kernel video driver
*/
/*
* Copyright (C) 2016 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
* --------------------------------------------------------------------
*
* This code is based on
* qxl_irq.c
* with the following copyright and permission notice:
*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Dave Airlie
* Alon Levy
*/
#include "vbox_drv.h"
#include <VBox/VBoxVideo.h>
#include <drm/drm_crtc_helper.h>
static void vbox_clear_irq(void)
{
outl((uint32_t)~0, VGA_PORT_HGSMI_HOST);
}
static uint32_t vbox_get_flags(struct vbox_private *vbox)
{
return (uint32_t)readl(vbox->vram + vbox->host_flags_offset);
}
void vbox_report_hotplug(struct vbox_private *vbox)
{
schedule_work(&vbox->hotplug_work);
}
irqreturn_t vbox_irq_handler(int irq, void *arg)
{
struct drm_device *dev = (struct drm_device *) arg;
struct vbox_private *vbox = (struct vbox_private *)dev->dev_private;
uint32_t host_flags = vbox_get_flags(vbox);
if (!(host_flags & HGSMIHOSTFLAGS_IRQ))
return IRQ_NONE;
/* Due to a bug in the initial host implementation of hot-plug interrupts,
* the hot-plug and cursor capability flags were never cleared. Fortunately
* we can tell when they would have been set by checking that the VSYNC flag
* is not set. */
if ( host_flags & (HGSMIHOSTFLAGS_HOTPLUG | HGSMIHOSTFLAGS_CURSOR_CAPABILITIES)
&& !(host_flags & HGSMIHOSTFLAGS_VSYNC))
vbox_report_hotplug(vbox);
vbox_clear_irq();
return IRQ_HANDLED;
}
/**
* Query the host for
*/
static void vbox_update_mode_hints(struct vbox_private *vbox)
{
struct drm_device *dev = vbox->dev;
struct drm_connector *connector;
struct vbox_connector *vbox_connector;
struct VBVAMODEHINT *hints;
uint16_t flags;
bool disconnected;
unsigned crtc_id;
int rc;
rc = VBoxHGSMIGetModeHints(&vbox->submit_info, vbox->num_crtcs,
vbox->last_mode_hints);
AssertMsgRCReturnVoid(rc, ("VBoxHGSMIGetModeHints failed, rc=%Rrc.\n", rc));
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
drm_modeset_lock_all(dev);
#else
mutex_lock(&dev->mode_config.mutex);
#endif
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
vbox_connector = to_vbox_connector(connector);
hints = &vbox->last_mode_hints[vbox_connector->vbox_crtc->crtc_id];
if (hints->magic == VBVAMODEHINT_MAGIC) {
LogFunc(("vboxvideo: %d: crtc_id=%u, mode %hdx%hd(enabled:%d),%hdx%hd\n",
__LINE__, (unsigned)vbox_connector->vbox_crtc->crtc_id,
(short)hints->cx, (short)hints->cy, (int)hints->fEnabled,
(short)hints->dx, (short)hints->dy));
disconnected = !(hints->fEnabled);
crtc_id = vbox_connector->vbox_crtc->crtc_id;
flags = VBVA_SCREEN_F_ACTIVE
| (disconnected ? VBVA_SCREEN_F_DISABLED : VBVA_SCREEN_F_BLANK);
vbox_connector->mode_hint.width = hints->cx & 0x8fff;
vbox_connector->mode_hint.height = hints->cy & 0x8fff;
vbox_connector->mode_hint.disconnected = disconnected;
if (vbox_connector->vbox_crtc->disconnected != disconnected) {
VBoxHGSMIProcessDisplayInfo(&vbox->submit_info, crtc_id,
0, 0, 0, hints->cx * 4, hints->cx,
hints->cy, 0, flags);
vbox_connector->vbox_crtc->disconnected = disconnected;
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
if ((hints->dx < 0xffff) && (hints->dy < 0xffff)) {
drm_object_property_set_value(&connector->base,
dev->mode_config.suggested_x_property, hints->dx & 0x8fff);
drm_object_property_set_value(&connector->base,
dev->mode_config.suggested_y_property, hints->dy & 0x8fff);
}
#endif
}
}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 9, 0)
drm_modeset_unlock_all(dev);
#else
mutex_unlock(&dev->mode_config.mutex);
#endif
}
static void vbox_hotplug_worker(struct work_struct *work)
{
struct vbox_private *vbox = container_of(work, struct vbox_private,
hotplug_work);
LogFunc(("vboxvideo: %d: vbox=%p\n", __LINE__, vbox));
vbox_update_mode_hints(vbox);
drm_kms_helper_hotplug_event(vbox->dev);
}
int vbox_irq_init(struct vbox_private *vbox)
{
int ret;
LogFunc(("vboxvideo: %d: vbox=%p\n", __LINE__, vbox));
vbox_update_mode_hints(vbox);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
ret = drm_irq_install(vbox->dev, vbox->dev->pdev->irq);
#else
ret = drm_irq_install(vbox->dev);
#endif
if (unlikely(ret != 0)) {
vbox_irq_fini(vbox);
DRM_ERROR("Failed installing irq: %d\n", ret);
return 1;
}
INIT_WORK(&vbox->hotplug_work, vbox_hotplug_worker);
vbox->isr_installed = true;
LogFunc(("vboxvideo: %d: vbox=%p\n", __LINE__, vbox));
return 0;
}
void vbox_irq_fini(struct vbox_private *vbox)
{
LogFunc(("vboxvideo: %d: vbox=%p\n", __LINE__, vbox));
if (vbox->isr_installed) {
drm_irq_uninstall(vbox->dev);
flush_work(&vbox->hotplug_work);
vbox->isr_installed = false;
}
}
/* $Id: vbox_main.c $ */
/** @file
* VirtualBox Additions Linux kernel video driver
*/
/*
* Copyright (C) 2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
* --------------------------------------------------------------------
*
* This code is based on
* ast_main.c
* with the following copyright and permission notice:
*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include "vbox_drv.h"
#include <VBox/VBoxVideoGuest.h>
#include <VBox/VBoxVideo.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_crtc_helper.h>
static void vbox_user_framebuffer_destroy(struct drm_framebuffer *fb)
{
struct vbox_framebuffer *vbox_fb = to_vbox_framebuffer(fb);
if (vbox_fb->obj)
drm_gem_object_unreference_unlocked(vbox_fb->obj);
LogFunc(("vboxvideo: %d: vbox_fb=%p, vbox_fb->obj=%p\n", __LINE__,
vbox_fb, vbox_fb->obj));
drm_framebuffer_cleanup(fb);
kfree(fb);
}
void vbox_enable_accel(struct vbox_private *vbox)
{
unsigned i;
struct VBVABUFFER *vbva;
AssertLogRelReturnVoid(vbox->vbva_info != NULL);
for (i = 0; i < vbox->num_crtcs; ++i) {
if (vbox->vbva_info[i].pVBVA == NULL) {
LogFunc(("vboxvideo: enabling VBVA.\n"));
vbva = (struct VBVABUFFER *) ( ((uint8_t *)vbox->vram)
+ vbox->vram_size
+ i * VBVA_MIN_BUFFER_SIZE);
if (!VBoxVBVAEnable(&vbox->vbva_info[i], &vbox->submit_info, vbva, i))
AssertReleaseMsgFailed(("VBoxVBVAEnable failed - heap allocation error, very old host or driver error.\n"));
}
}
}
void vbox_disable_accel(struct vbox_private *vbox)
{
unsigned i;
for (i = 0; i < vbox->num_crtcs; ++i)
VBoxVBVADisable(&vbox->vbva_info[i], &vbox->submit_info, i);
}
void vbox_enable_caps(struct vbox_private *vbox)
{
uint32_t caps = VBVACAPS_DISABLE_CURSOR_INTEGRATION
| VBVACAPS_IRQ
| VBVACAPS_USE_VBVA_ONLY;
if (vbox->initial_mode_queried)
caps |= VBVACAPS_VIDEO_MODE_HINTS;
VBoxHGSMISendCapsInfo(&vbox->submit_info, caps);
}
/** Send information about dirty rectangles to VBVA. If necessary we enable
* VBVA first, as this is normally disabled after a change of master in case
* the new master does not send dirty rectangle information (is this even
* allowed?) */
void vbox_framebuffer_dirty_rectangles(struct drm_framebuffer *fb,
struct drm_clip_rect *rects,
unsigned num_rects)
{
struct vbox_private *vbox = fb->dev->dev_private;
unsigned i;
LogFunc(("vboxvideo: %d: fb=%p, num_rects=%u, vbox=%p\n", __LINE__, fb,
num_rects, vbox));
vbox_enable_accel(vbox);
mutex_lock(&vbox->hw_mutex);
for (i = 0; i < num_rects; ++i)
{
struct drm_crtc *crtc;
list_for_each_entry(crtc, &fb->dev->mode_config.crtc_list, head)
{
unsigned crtc_id = to_vbox_crtc(crtc)->crtc_id;
VBVACMDHDR cmd_hdr;
if ( CRTC_FB(crtc) != fb
|| rects[i].x1 > crtc->x
+ crtc->hwmode.hdisplay
|| rects[i].y1 > crtc->y
+ crtc->hwmode.vdisplay
|| rects[i].x2 < crtc->x
|| rects[i].y2 < crtc->y)
continue;
cmd_hdr.x = (int16_t)rects[i].x1;
cmd_hdr.y = (int16_t)rects[i].y1;
cmd_hdr.w = (uint16_t)rects[i].x2 - rects[i].x1;
cmd_hdr.h = (uint16_t)rects[i].y2 - rects[i].y1;
if (VBoxVBVABufferBeginUpdate(&vbox->vbva_info[crtc_id],
&vbox->submit_info))
{
VBoxVBVAWrite(&vbox->vbva_info[crtc_id], &vbox->submit_info, &cmd_hdr,
sizeof(cmd_hdr));
VBoxVBVABufferEndUpdate(&vbox->vbva_info[crtc_id]);
}
}
}
mutex_unlock(&vbox->hw_mutex);
LogFunc(("vboxvideo: %d\n", __LINE__));
}
static int vbox_user_framebuffer_dirty(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned flags, unsigned color,
struct drm_clip_rect *rects,
unsigned num_rects)
{
LogFunc(("vboxvideo: %d, flags=%u\n", __LINE__, flags));
vbox_framebuffer_dirty_rectangles(fb, rects, num_rects);
return 0;
}
static const struct drm_framebuffer_funcs vbox_fb_funcs = {
.destroy = vbox_user_framebuffer_destroy,
.dirty = vbox_user_framebuffer_dirty,
};
int vbox_framebuffer_init(struct drm_device *dev,
struct vbox_framebuffer *vbox_fb,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
const
#endif
struct DRM_MODE_FB_CMD *mode_cmd,
struct drm_gem_object *obj)
{
int ret;
LogFunc(("vboxvideo: %d: dev=%p, vbox_fb=%p, obj=%p\n", __LINE__, dev,
vbox_fb, obj));
drm_helper_mode_fill_fb_struct(&vbox_fb->base, mode_cmd);
vbox_fb->obj = obj;
ret = drm_framebuffer_init(dev, &vbox_fb->base, &vbox_fb_funcs);
if (ret) {
DRM_ERROR("framebuffer init failed %d\n", ret);
LogFunc(("vboxvideo: %d\n", __LINE__));
return ret;
}
LogFunc(("vboxvideo: %d\n", __LINE__));
return 0;
}
static struct drm_framebuffer *
vbox_user_framebuffer_create(struct drm_device *dev,
struct drm_file *filp,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
const
#endif
struct drm_mode_fb_cmd2 *mode_cmd)
{
struct drm_gem_object *obj;
struct vbox_framebuffer *vbox_fb;
int ret;
LogFunc(("vboxvideo: %d\n", __LINE__));
obj = drm_gem_object_lookup(dev, filp, mode_cmd->handles[0]);
if (obj == NULL)
return ERR_PTR(-ENOENT);
vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
if (!vbox_fb) {
drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(-ENOMEM);
}
ret = vbox_framebuffer_init(dev, vbox_fb, mode_cmd, obj);
if (ret) {
drm_gem_object_unreference_unlocked(obj);
kfree(vbox_fb);
return ERR_PTR(ret);
}
LogFunc(("vboxvideo: %d\n", __LINE__));
return &vbox_fb->base;
}
static const struct drm_mode_config_funcs vbox_mode_funcs = {
.fb_create = vbox_user_framebuffer_create,
};
static void vbox_accel_fini(struct vbox_private *vbox)
{
if (vbox->vbva_info)
{
vbox_disable_accel(vbox);
kfree(vbox->vbva_info);
vbox->vbva_info = NULL;
}
}
static int vbox_accel_init(struct vbox_private *vbox)
{
unsigned i;
LogFunc(("vboxvideo: %d: vbox=%p, vbox->num_crtcs=%u, vbox->vbva_info=%p\n",
__LINE__, vbox, (unsigned)vbox->num_crtcs, vbox->vbva_info));
if (!vbox->vbva_info)
{
vbox->vbva_info = kzalloc( sizeof(struct VBVABUFFERCONTEXT)
* vbox->num_crtcs,
GFP_KERNEL);
if (!vbox->vbva_info)
return -ENOMEM;
}
/* Take a command buffer for each screen from the end of usable VRAM. */
vbox->vram_size -= vbox->num_crtcs * VBVA_MIN_BUFFER_SIZE;
for (i = 0; i < vbox->num_crtcs; ++i)
VBoxVBVASetupBufferContext(&vbox->vbva_info[i],
vbox->vram_size + i * VBVA_MIN_BUFFER_SIZE,
VBVA_MIN_BUFFER_SIZE);
LogFunc(("vboxvideo: %d: vbox->vbva_info=%p, vbox->vram_size=%u\n",
__LINE__, vbox->vbva_info, (unsigned)vbox->vram_size));
return 0;
}
/** Allocation function for the HGSMI heap and data. */
static DECLCALLBACK(void *) alloc_hgsmi_environ(void *environ, HGSMISIZE size)
{
NOREF(environ);
return kmalloc(size, GFP_KERNEL);
}
/** Free function for the HGSMI heap and data. */
static DECLCALLBACK(void) free_hgsmi_environ(void *environ, void *ptr)
{
NOREF(environ);
kfree(ptr);
}
/** Pointers to the HGSMI heap and data manipulation functions. */
static HGSMIENV hgsmi_environ =
{
NULL,
alloc_hgsmi_environ,
free_hgsmi_environ
};
/** Do we support the 4.3 plus mode hint reporting interface? */
static bool have_hgsmi_mode_hints(struct vbox_private *vbox)
{
uint32_t have_hints, have_cursor;
return RT_SUCCESS(VBoxQueryConfHGSMI(&vbox->submit_info, VBOX_VBVA_CONF32_MODE_HINT_REPORTING, &have_hints))
&& RT_SUCCESS(VBoxQueryConfHGSMI(&vbox->submit_info, VBOX_VBVA_CONF32_GUEST_CURSOR_REPORTING, &have_cursor))
&& have_hints == VINF_SUCCESS
&& have_cursor == VINF_SUCCESS;
}
/** Set up our heaps and data exchange buffers in VRAM before handing the rest
* to the memory manager. */
static int vbox_hw_init(struct vbox_private *vbox)
{
uint32_t base_offset, guest_heap_offset, guest_heap_size, host_flags_offset;
void *guest_heap;
vbox->full_vram_size = VBoxVideoGetVRAMSize();
vbox->any_pitch = VBoxVideoAnyWidthAllowed();
DRM_INFO("VRAM %08x\n", vbox->full_vram_size);
VBoxHGSMIGetBaseMappingInfo(vbox->full_vram_size, &base_offset, NULL,
&guest_heap_offset, &guest_heap_size, &host_flags_offset);
guest_heap = ((uint8_t *)vbox->vram) + base_offset + guest_heap_offset;
vbox->host_flags_offset = base_offset + host_flags_offset;
if (RT_FAILURE(VBoxHGSMISetupGuestContext(&vbox->submit_info, guest_heap,
guest_heap_size,
base_offset + guest_heap_offset,
&hgsmi_environ)))
return -ENOMEM;
/* Reduce available VRAM size to reflect the guest heap. */
vbox->vram_size = base_offset;
/* Linux drm represents monitors as a 32-bit array. */
vbox->num_crtcs = RT_MIN(VBoxHGSMIGetMonitorCount(&vbox->submit_info), 32);
if (!have_hgsmi_mode_hints(vbox))
return -ENOTSUPP;
vbox->last_mode_hints = kzalloc(sizeof(VBVAMODEHINT) * vbox->num_crtcs, GFP_KERNEL);
if (!vbox->last_mode_hints)
return -ENOMEM;
return vbox_accel_init(vbox);
}
static void vbox_hw_fini(struct vbox_private *vbox)
{
vbox_accel_fini(vbox);
if (vbox->last_mode_hints)
kfree(vbox->last_mode_hints);
vbox->last_mode_hints = NULL;
}
int vbox_driver_load(struct drm_device *dev, unsigned long flags)
{
struct vbox_private *vbox;
int ret = 0;
LogFunc(("vboxvideo: %d: dev=%p\n", __LINE__, dev));
if (!VBoxHGSMIIsSupported())
return -ENODEV;
vbox = kzalloc(sizeof(struct vbox_private), GFP_KERNEL);
if (!vbox)
return -ENOMEM;
dev->dev_private = vbox;
vbox->dev = dev;
mutex_init(&vbox->hw_mutex);
/* I hope this won't interfere with the memory manager. */
vbox->vram = pci_iomap(dev->pdev, 0, 0);
if (!vbox->vram) {
ret = -EIO;
goto out_free;
}
ret = vbox_hw_init(vbox);
if (ret)
goto out_free;
ret = vbox_mm_init(vbox);
if (ret)
goto out_free;
drm_mode_config_init(dev);
dev->mode_config.funcs = (void *)&vbox_mode_funcs;
dev->mode_config.min_width = 64;
dev->mode_config.min_height = 64;
dev->mode_config.preferred_depth = 24;
dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
ret = vbox_mode_init(dev);
if (ret)
goto out_free;
ret = vbox_irq_init(vbox);
if (ret)
goto out_free;
ret = vbox_fbdev_init(dev);
if (ret)
goto out_free;
LogFunc(("vboxvideo: %d: vbox=%p, vbox->vram=%p, vbox->full_vram_size=%u\n",
__LINE__, vbox, vbox->vram, (unsigned)vbox->full_vram_size));
return 0;
out_free:
vbox_driver_unload(dev);
LogFunc(("vboxvideo: %d: ret=%d\n", __LINE__, ret));
return ret;
}
int vbox_driver_unload(struct drm_device *dev)
{
struct vbox_private *vbox = dev->dev_private;
LogFunc(("vboxvideo: %d\n", __LINE__));
vbox_fbdev_fini(dev);
vbox_irq_fini(vbox);
vbox_mode_fini(dev);
if (dev->mode_config.funcs)
drm_mode_config_cleanup(dev);
vbox_hw_fini(vbox);
vbox_mm_fini(vbox);
if (vbox->vram)
pci_iounmap(dev->pdev, vbox->vram);
kfree(vbox);
dev->dev_private = NULL;
LogFunc(("vboxvideo: %d\n", __LINE__));
return 0;
}
/** @note this is described in the DRM framework documentation. AST does not
* have it, but we get an oops on driver unload if it is not present. */
void vbox_driver_lastclose(struct drm_device *dev)
{
struct vbox_private *vbox = dev->dev_private;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0)
if (vbox->fbdev)
drm_fb_helper_restore_fbdev_mode_unlocked(&vbox->fbdev->helper);
#else
mutex_lock(&dev->mode_config.mutex);
if (vbox->fbdev)
drm_fb_helper_restore_fbdev_mode(&vbox->fbdev->helper);
mutex_unlock(&dev->mode_config.mutex);
#endif
}
int vbox_gem_create(struct drm_device *dev,
u32 size, bool iskernel,
struct drm_gem_object **obj)
{
struct vbox_bo *vboxbo;
int ret;
LogFunc(("vboxvideo: %d: dev=%p, size=%u, iskernel=%u\n", __LINE__,
dev, (unsigned)size, (unsigned)iskernel));
*obj = NULL;
size = roundup(size, PAGE_SIZE);
if (size == 0)
return -EINVAL;
ret = vbox_bo_create(dev, size, 0, 0, &vboxbo);
if (ret) {
if (ret != -ERESTARTSYS)
DRM_ERROR("failed to allocate GEM object\n");
return ret;
}
*obj = &vboxbo->gem;
LogFunc(("vboxvideo: %d: obj=%p\n", __LINE__, obj));
return 0;
}
int vbox_dumb_create(struct drm_file *file,
struct drm_device *dev,
struct drm_mode_create_dumb *args)
{
int ret;
struct drm_gem_object *gobj;
u32 handle;
LogFunc(("vboxvideo: %d: args->width=%u, args->height=%u, args->bpp=%u\n",
__LINE__, (unsigned)args->width, (unsigned)args->height,
(unsigned)args->bpp));
args->pitch = args->width * ((args->bpp + 7) / 8);
args->size = args->pitch * args->height;
ret = vbox_gem_create(dev, args->size, false,
&gobj);
if (ret)
return ret;
ret = drm_gem_handle_create(file, gobj, &handle);
drm_gem_object_unreference_unlocked(gobj);
if (ret)
return ret;
args->handle = handle;
LogFunc(("vboxvideo: %d: args->handle=%u\n", __LINE__,
(unsigned)args->handle));
return 0;
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
int vbox_dumb_destroy(struct drm_file *file,
struct drm_device *dev,
uint32_t handle)
{
LogFunc(("vboxvideo: %d: dev=%p, handle=%u\n", __LINE__, dev,
(unsigned)handle));
return drm_gem_handle_delete(file, handle);
}
#endif
static void vbox_bo_unref(struct vbox_bo **bo)
{
struct ttm_buffer_object *tbo;
if ((*bo) == NULL)
return;
LogFunc(("vboxvideo: %d: bo=%p\n", __LINE__, bo));
tbo = &((*bo)->bo);
ttm_bo_unref(&tbo);
if (tbo == NULL)
*bo = NULL;
}
void vbox_gem_free_object(struct drm_gem_object *obj)
{
struct vbox_bo *vbox_bo = gem_to_vbox_bo(obj);
LogFunc(("vboxvideo: %d: vbox_bo=%p\n", __LINE__, vbox_bo));
vbox_bo_unref(&vbox_bo);
}
static inline u64 vbox_bo_mmap_offset(struct vbox_bo *bo)
{
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
return bo->bo.addr_space_offset;
#else
return drm_vma_node_offset_addr(&bo->bo.vma_node);
#endif
}
int
vbox_dumb_mmap_offset(struct drm_file *file,
struct drm_device *dev,
uint32_t handle,
uint64_t *offset)
{
struct drm_gem_object *obj;
int ret;
struct vbox_bo *bo;
LogFunc(("vboxvideo: %d: dev=%p, handle=%u\n", __LINE__,
dev, (unsigned)handle));
mutex_lock(&dev->struct_mutex);
obj = drm_gem_object_lookup(dev, file, handle);
if (obj == NULL) {
ret = -ENOENT;
goto out_unlock;
}
bo = gem_to_vbox_bo(obj);
*offset = vbox_bo_mmap_offset(bo);
drm_gem_object_unreference(obj);
ret = 0;
LogFunc(("vboxvideo: %d: bo=%p, *offset=%llu\n", __LINE__,
bo, (unsigned long long)*offset));
out_unlock:
mutex_unlock(&dev->struct_mutex);
return ret;
}
/* $Id: vbox_mode.c $ */
/** @file
* VirtualBox Additions Linux kernel video driver
*/
/*
* Copyright (C) 2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
* --------------------------------------------------------------------
*
* This code is based on
* ast_mode.c
* with the following copyright and permission notice:
*
* Copyright 2012 Red Hat Inc.
* Parts based on xf86-video-ast
* Copyright (c) 2005 ASPEED Technology Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include "vbox_drv.h"
#include <VBox/VBoxVideo.h>
#include <linux/export.h>
#include <drm/drm_crtc_helper.h>
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
# include <drm/drm_plane_helper.h>
#endif
static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height,
int32_t hot_x, int32_t hot_y);
static int vbox_cursor_move(struct drm_crtc *crtc, int x, int y);
/** Set a graphics mode. Poke any required values into registers, do an HGSMI
* mode set and tell the host we support advanced graphics functions.
*/
static void vbox_do_modeset(struct drm_crtc *crtc,
const struct drm_display_mode *mode)
{
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
struct vbox_private *vbox;
int width, height, bpp, pitch;
unsigned crtc_id;
uint16_t flags;
LogFunc(("vboxvideo: %d: vbox_crtc=%p, CRTC_FB(crtc)=%p\n", __LINE__,
vbox_crtc, CRTC_FB(crtc)));
vbox = crtc->dev->dev_private;
width = mode->hdisplay ? mode->hdisplay : 640;
height = mode->vdisplay ? mode->vdisplay : 480;
crtc_id = vbox_crtc->crtc_id;
bpp = crtc->enabled ? CRTC_FB(crtc)->bits_per_pixel : 32;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
pitch = crtc->enabled ? CRTC_FB(crtc)->pitch : width * bpp / 8;
#else
pitch = crtc->enabled ? CRTC_FB(crtc)->pitches[0] : width * bpp / 8;
#endif
/* This is the old way of setting graphics modes. It assumed one screen
* and a frame-buffer at the start of video RAM. On older versions of
* VirtualBox, certain parts of the code still assume that the first
* screen is programmed this way, so try to fake it. */
if ( vbox_crtc->crtc_id == 0
&& crtc->enabled
&& vbox_crtc->fb_offset / pitch < 0xffff - crtc->y
&& vbox_crtc->fb_offset % (bpp / 8) == 0)
VBoxVideoSetModeRegisters(width, height, pitch * 8 / bpp,
CRTC_FB(crtc)->bits_per_pixel, 0,
vbox_crtc->fb_offset % pitch / bpp * 8 + crtc->x,
vbox_crtc->fb_offset / pitch + crtc->y);
flags = VBVA_SCREEN_F_ACTIVE;
flags |= (crtc->enabled && !vbox_crtc->blanked ? 0 : VBVA_SCREEN_F_BLANK);
flags |= (vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0);
VBoxHGSMIProcessDisplayInfo(&vbox->submit_info, vbox_crtc->crtc_id,
crtc->x, crtc->y,
crtc->x * bpp / 8 + crtc->y * pitch,
pitch, width, height,
vbox_crtc->blanked ? 0 : bpp, flags);
VBoxHGSMIReportFlagsLocation(&vbox->submit_info, vbox->host_flags_offset);
LogFunc(("vboxvideo: %d\n", __LINE__));
}
static int vbox_set_view(struct drm_crtc *crtc)
{
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
struct vbox_private *vbox = crtc->dev->dev_private;
void *p;
LogFunc(("vboxvideo: %d: vbox_crtc=%p\n", __LINE__, vbox_crtc));
/* Tell the host about the view. This design originally targeted the
* Windows XP driver architecture and assumed that each screen would have
* a dedicated frame buffer with the command buffer following it, the whole
* being a "view". The host works out which screen a command buffer belongs
* to by checking whether it is in the first view, then whether it is in the
* second and so on. The first match wins. We cheat around this by making
* the first view be the managed memory plus the first command buffer, the
* second the same plus the second buffer and so on. */
p = VBoxHGSMIBufferAlloc(&vbox->submit_info, sizeof(VBVAINFOVIEW), HGSMI_CH_VBVA,
VBVA_INFO_VIEW);
if (p)
{
VBVAINFOVIEW *pInfo = (VBVAINFOVIEW *)p;
pInfo->u32ViewIndex = vbox_crtc->crtc_id;
pInfo->u32ViewOffset = vbox_crtc->fb_offset;
pInfo->u32ViewSize = vbox->vram_size - vbox_crtc->fb_offset
+ vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE;
pInfo->u32MaxScreenSize = vbox->vram_size - vbox_crtc->fb_offset;
VBoxHGSMIBufferSubmit(&vbox->submit_info, p);
VBoxHGSMIBufferFree(&vbox->submit_info, p);
}
else
return -ENOMEM;
LogFunc(("vboxvideo: %d: p=%p\n", __LINE__, p));
return 0;
}
static void vbox_crtc_load_lut(struct drm_crtc *crtc)
{
}
static void vbox_crtc_dpms(struct drm_crtc *crtc, int mode)
{
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
struct vbox_private *vbox = crtc->dev->dev_private;
LogFunc(("vboxvideo: %d: vbox_crtc=%p, mode=%d\n", __LINE__, vbox_crtc,
mode));
switch (mode) {
case DRM_MODE_DPMS_ON:
vbox_crtc->blanked = false;
break;
case DRM_MODE_DPMS_STANDBY:
case DRM_MODE_DPMS_SUSPEND:
case DRM_MODE_DPMS_OFF:
vbox_crtc->blanked = true;
break;
}
mutex_lock(&vbox->hw_mutex);
vbox_do_modeset(crtc, &crtc->hwmode);
mutex_unlock(&vbox->hw_mutex);
LogFunc(("vboxvideo: %d\n", __LINE__));
}
static bool vbox_crtc_mode_fixup(struct drm_crtc *crtc,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
/* We move buffers which are not in active use out of VRAM to save memory. */
static int vbox_crtc_do_set_base(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int x, int y, int atomic)
{
struct vbox_private *vbox = crtc->dev->dev_private;
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
struct drm_gem_object *obj;
struct vbox_framebuffer *vbox_fb;
struct vbox_bo *bo;
int ret;
u64 gpu_addr;
LogFunc(("vboxvideo: %d: fb=%p, vbox_crtc=%p\n", __LINE__, fb, vbox_crtc));
/* push the previous fb to system ram */
if (!atomic && fb) {
vbox_fb = to_vbox_framebuffer(fb);
obj = vbox_fb->obj;
bo = gem_to_vbox_bo(obj);
ret = vbox_bo_reserve(bo, false);
if (ret)
return ret;
vbox_bo_push_sysram(bo);
vbox_bo_unreserve(bo);
}
vbox_fb = to_vbox_framebuffer(CRTC_FB(crtc));
obj = vbox_fb->obj;
bo = gem_to_vbox_bo(obj);
ret = vbox_bo_reserve(bo, false);
if (ret)
return ret;
ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM, &gpu_addr);
if (ret) {
vbox_bo_unreserve(bo);
return ret;
}
if (&vbox->fbdev->afb == vbox_fb) {
/* if pushing console in kmap it */
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret)
DRM_ERROR("failed to kmap fbcon\n");
}
vbox_bo_unreserve(bo);
/* vbox_set_start_address_crt1(crtc, (u32)gpu_addr); */
vbox_crtc->fb_offset = gpu_addr;
if (vbox_crtc->crtc_id == 0) {
vbox->input_mapping_width = CRTC_FB(crtc)->width;
vbox->input_mapping_height = CRTC_FB(crtc)->height;
}
LogFunc(("vboxvideo: %d: vbox_fb=%p, obj=%p, bo=%p, gpu_addr=%u\n",
__LINE__, vbox_fb, obj, bo, (unsigned)gpu_addr));
return 0;
}
static int vbox_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb)
{
LogFunc(("vboxvideo: %d\n", __LINE__));
return vbox_crtc_do_set_base(crtc, old_fb, x, y, 0);
}
static int vbox_crtc_mode_set(struct drm_crtc *crtc,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode,
int x, int y,
struct drm_framebuffer *old_fb)
{
struct vbox_private *vbox = crtc->dev->dev_private;
int rc = 0;
LogFunc(("vboxvideo: %d: vbox=%p\n", __LINE__, vbox));
vbox_crtc_mode_set_base(crtc, x, y, old_fb);
mutex_lock(&vbox->hw_mutex);
rc = vbox_set_view(crtc);
if (!rc)
vbox_do_modeset(crtc, mode);
/* Note that the input mapping is always relative to the first screen. */
VBoxHGSMIUpdateInputMapping(&vbox->submit_info, 0, 0,
vbox->input_mapping_width,
vbox->input_mapping_height);
mutex_unlock(&vbox->hw_mutex);
LogFunc(("vboxvideo: %d\n", __LINE__));
return rc;
}
static void vbox_crtc_disable(struct drm_crtc *crtc)
{
}
static void vbox_crtc_prepare(struct drm_crtc *crtc)
{
}
static void vbox_crtc_commit(struct drm_crtc *crtc)
{
}
static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
.dpms = vbox_crtc_dpms,
.mode_fixup = vbox_crtc_mode_fixup,
.mode_set = vbox_crtc_mode_set,
/* .mode_set_base = vbox_crtc_mode_set_base, */
.disable = vbox_crtc_disable,
.load_lut = vbox_crtc_load_lut,
.prepare = vbox_crtc_prepare,
.commit = vbox_crtc_commit,
};
static void vbox_crtc_reset(struct drm_crtc *crtc)
{
}
static void vbox_crtc_destroy(struct drm_crtc *crtc)
{
drm_crtc_cleanup(crtc);
kfree(crtc);
}
static const struct drm_crtc_funcs vbox_crtc_funcs = {
.cursor_move = vbox_cursor_move,
.cursor_set2 = vbox_cursor_set2,
.reset = vbox_crtc_reset,
.set_config = drm_crtc_helper_set_config,
/* .gamma_set = vbox_crtc_gamma_set, */
.destroy = vbox_crtc_destroy,
};
static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned i)
{
struct vbox_crtc *vbox_crtc;
LogFunc(("vboxvideo: %d\n", __LINE__));
vbox_crtc = kzalloc(sizeof(struct vbox_crtc), GFP_KERNEL);
if (!vbox_crtc)
return NULL;
vbox_crtc->crtc_id = i;
drm_crtc_init(dev, &vbox_crtc->base, &vbox_crtc_funcs);
drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256);
drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs);
LogFunc(("vboxvideo: %d: crtc=%p\n", __LINE__, vbox_crtc));
return vbox_crtc;
}
static void vbox_encoder_destroy(struct drm_encoder *encoder)
{
LogFunc(("vboxvideo: %d: encoder=%p\n", __LINE__, encoder));
drm_encoder_cleanup(encoder);
kfree(encoder);
}
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0)
static struct drm_encoder *drm_encoder_find(struct drm_device *dev, uint32_t id)
{
struct drm_mode_object *mo;
mo = drm_mode_object_find(dev, id, DRM_MODE_OBJECT_ENCODER);
return mo ? obj_to_encoder(mo) : NULL;
}
#endif
static struct drm_encoder *vbox_best_single_encoder(struct drm_connector *connector)
{
int enc_id = connector->encoder_ids[0];
LogFunc(("vboxvideo: %d: connector=%p\n", __LINE__, connector));
/* pick the encoder ids */
if (enc_id)
return drm_encoder_find(connector->dev, enc_id);
LogFunc(("vboxvideo: %d\n", __LINE__));
return NULL;
}
static const struct drm_encoder_funcs vbox_enc_funcs = {
.destroy = vbox_encoder_destroy,
};
static void vbox_encoder_dpms(struct drm_encoder *encoder, int mode)
{
}
static bool vbox_mode_fixup(struct drm_encoder *encoder,
const struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
return true;
}
static void vbox_encoder_mode_set(struct drm_encoder *encoder,
struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode)
{
}
static void vbox_encoder_prepare(struct drm_encoder *encoder)
{
}
static void vbox_encoder_commit(struct drm_encoder *encoder)
{
}
static const struct drm_encoder_helper_funcs vbox_enc_helper_funcs = {
.dpms = vbox_encoder_dpms,
.mode_fixup = vbox_mode_fixup,
.prepare = vbox_encoder_prepare,
.commit = vbox_encoder_commit,
.mode_set = vbox_encoder_mode_set,
};
static struct drm_encoder *vbox_encoder_init(struct drm_device *dev, unsigned i)
{
struct vbox_encoder *vbox_encoder;
LogFunc(("vboxvideo: %d: dev=%d\n", __LINE__));
vbox_encoder = kzalloc(sizeof(struct vbox_encoder), GFP_KERNEL);
if (!vbox_encoder)
return NULL;
drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs,
DRM_MODE_ENCODER_DAC
#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 5, 0)
, NULL
#endif
);
drm_encoder_helper_add(&vbox_encoder->base, &vbox_enc_helper_funcs);
vbox_encoder->base.possible_crtcs = 1 << i;
LogFunc(("vboxvideo: %d: vbox_encoder=%p\n", __LINE__, vbox_encoder));
return &vbox_encoder->base;
}
/** Generate EDID data with a mode-unique serial number for the virtual
* monitor to try to persuade Unity that different modes correspond to
* different monitors and it should not try to force the same resolution on
* them. */
static void vbox_set_edid(struct drm_connector *connector, int width,
int height)
{
enum { EDID_SIZE = 128 };
unsigned char edid[EDID_SIZE] = {
0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */
0x58, 0x58, /* manufacturer (VBX) */
0x00, 0x00, /* product code */
0x00, 0x00,0x00, 0x00, /* serial number goes here */
0x01, /* week of manufacture */
0x00, /* year of manufacture */
0x01, 0x03, /* EDID version */
0x80, /* capabilities - digital */
0x00, /* horiz. res in cm, zero for projectors */
0x00, /* vert. res in cm */
0x78, /* display gamma (120 == 2.2). */
0xEE, /* features (standby, suspend, off, RGB, standard colour space,
* preferred timing mode) */
0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54,
/* chromaticity for standard colour space. */
0x00, 0x00, 0x00, /* no default timings */
0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
0x01, 0x01, 0x01, 0x01, /* no standard timings */
0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02, 0x02, 0x02,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* descriptor block 1 goes here */
0x00, 0x00, 0x00, 0xFD, 0x00, /* descriptor block 2, monitor ranges */
0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20, /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */
0x00, 0x00, 0x00, 0xFC, 0x00, /* descriptor block 3, monitor name */
'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r', '\n',
0x00, 0x00, 0x00, 0x10, 0x00, /* descriptor block 4: dummy data */
0x0A, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
0x20,
0x00, /* number of extensions */
0x00 /* checksum goes here */
};
int clock = (width + 6) * (height + 6) * 60 / 10000;
unsigned i;
unsigned sum = 0;
edid[12] = width & 0xff;
edid[13] = width >> 8;
edid[14] = height & 0xff;
edid[15] = height >> 8;
edid[54] = clock & 0xff;
edid[55] = clock >> 8;
edid[56] = width & 0xff;
edid[58] = (width >> 4) & 0xf0;
edid[59] = height & 0xff;
edid[61] = (height >> 4) & 0xf0;
for (i = 0; i < EDID_SIZE - 1; ++i)
sum += edid[i];
edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
drm_mode_connector_update_edid_property(connector, (struct edid *)edid);
}
static int vbox_get_modes(struct drm_connector *connector)
{
struct vbox_connector *vbox_connector = NULL;
struct drm_display_mode *mode = NULL;
struct vbox_private *vbox = NULL;
unsigned num_modes = 0;
int preferred_width, preferred_height;
LogFunc(("vboxvideo: %d: connector=%p\n", __LINE__, connector));
vbox_connector = to_vbox_connector(connector);
vbox = connector->dev->dev_private;
if (vbox_connector->vbox_crtc->crtc_id == 0)
vbox_enable_caps(vbox);
if (!vbox->initial_mode_queried) {
if (vbox_connector->vbox_crtc->crtc_id == 0) {
vbox->initial_mode_queried = true;
vbox_report_hotplug(vbox);
}
return drm_add_modes_noedid(connector, 800, 600);
}
num_modes = drm_add_modes_noedid(connector, 2560, 1600);
preferred_width = vbox_connector->mode_hint.width ? vbox_connector->mode_hint.width : 1024;
preferred_height = vbox_connector->mode_hint.height ? vbox_connector->mode_hint.height : 768;
mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height, 60, false,
false, false);
if (mode)
{
mode->type |= DRM_MODE_TYPE_PREFERRED;
drm_mode_probed_add(connector, mode);
++num_modes;
}
vbox_set_edid(connector, preferred_width, preferred_height);
return num_modes;
}
static int vbox_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
return MODE_OK;
}
static void vbox_connector_destroy(struct drm_connector *connector)
{
struct vbox_connector *vbox_connector = NULL;
LogFunc(("vboxvideo: %d: connector=%p\n", __LINE__, connector));
vbox_connector = to_vbox_connector(connector);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
drm_sysfs_connector_remove(connector);
#else
drm_connector_unregister(connector);
#endif
drm_connector_cleanup(connector);
kfree(connector);
}
static enum drm_connector_status
vbox_connector_detect(struct drm_connector *connector, bool force)
{
struct vbox_connector *vbox_connector = NULL;
(void) force;
LogFunc(("vboxvideo: %d: connector=%p\n", __LINE__, connector));
vbox_connector = to_vbox_connector(connector);
return vbox_connector->mode_hint.disconnected ?
connector_status_disconnected : connector_status_connected;
}
static int vbox_fill_modes(struct drm_connector *connector, uint32_t max_x, uint32_t max_y)
{
struct vbox_connector *vbox_connector;
struct drm_device *dev;
struct drm_display_mode *mode, *iterator;
LogFunc(("vboxvideo: %d: connector=%p, max_x=%lu, max_y = %lu\n", __LINE__,
connector, (unsigned long)max_x, (unsigned long)max_y));
vbox_connector = to_vbox_connector(connector);
dev = vbox_connector->base.dev;
list_for_each_entry_safe(mode, iterator, &connector->modes, head)
{
list_del(&mode->head);
drm_mode_destroy(dev, mode);
}
return drm_helper_probe_single_connector_modes(connector, max_x, max_y);
}
static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = {
.mode_valid = vbox_mode_valid,
.get_modes = vbox_get_modes,
.best_encoder = vbox_best_single_encoder,
};
static const struct drm_connector_funcs vbox_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.detect = vbox_connector_detect,
.fill_modes = vbox_fill_modes,
.destroy = vbox_connector_destroy,
};
static int vbox_connector_init(struct drm_device *dev,
struct vbox_crtc *vbox_crtc,
struct drm_encoder *encoder)
{
struct vbox_connector *vbox_connector;
struct drm_connector *connector;
LogFunc(("vboxvideo: %d: dev=%p, encoder=%p\n", __LINE__, dev,
encoder));
vbox_connector = kzalloc(sizeof(struct vbox_connector), GFP_KERNEL);
if (!vbox_connector)
return -ENOMEM;
connector = &vbox_connector->base;
vbox_connector->vbox_crtc = vbox_crtc;
drm_connector_init(dev, connector, &vbox_connector_funcs,
DRM_MODE_CONNECTOR_VGA);
drm_connector_helper_add(connector, &vbox_connector_helper_funcs);
connector->interlace_allowed = 0;
connector->doublescan_allowed = 0;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)
drm_mode_create_suggested_offset_properties(dev);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_x_property, 0);
drm_object_attach_property(&connector->base,
dev->mode_config.suggested_y_property, 0);
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0)
drm_sysfs_connector_add(connector);
#else
drm_connector_register(connector);
#endif
drm_mode_connector_attach_encoder(connector, encoder);
LogFunc(("vboxvideo: %d: connector=%p\n", __LINE__, connector));
return 0;
}
int vbox_mode_init(struct drm_device *dev)
{
struct vbox_private *vbox = dev->dev_private;
struct drm_encoder *encoder;
struct vbox_crtc *vbox_crtc;
unsigned i;
/* vbox_cursor_init(dev); */
LogFunc(("vboxvideo: %d: dev=%p\n", __LINE__, dev));
for (i = 0; i < vbox->num_crtcs; ++i)
{
vbox_crtc = vbox_crtc_init(dev, i);
if (!vbox_crtc)
return -ENOMEM;
encoder = vbox_encoder_init(dev, i);
if (!encoder)
return -ENOMEM;
vbox_connector_init(dev, vbox_crtc, encoder);
}
return 0;
}
void vbox_mode_fini(struct drm_device *dev)
{
/* vbox_cursor_fini(dev); */
}
/** Copy the ARGB image and generate the mask, which is needed in case the host
* does not support ARGB cursors. The mask is a 1BPP bitmap with the bit set
* if the corresponding alpha value in the ARGB image is greater than 0xF0. */
static void copy_cursor_image(u8 *src, u8 *dst, int width, int height,
size_t mask_size)
{
unsigned i, j;
size_t line_size = (width + 7) / 8;
memcpy(dst + mask_size, src, width * height * 4);
for (i = 0; i < height; ++i)
for (j = 0; j < width; ++j)
if (((uint32_t *)src)[i * width + j] > 0xf0000000)
dst[i * line_size + j / 8] |= (0x80 >> (j % 8));
}
static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv,
uint32_t handle, uint32_t width, uint32_t height,
int32_t hot_x, int32_t hot_y)
{
struct vbox_private *vbox = crtc->dev->dev_private;
struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
struct drm_gem_object *obj;
struct vbox_bo *bo;
int ret, rc;
struct ttm_bo_kmap_obj uobj_map;
u8 *src;
u8 *dst = NULL;
u32 caps = 0;
size_t data_size, mask_size;
bool src_isiomem;
if (!handle) {
bool cursor_enabled = false;
struct drm_crtc *crtci;
/* Hide cursor. */
vbox_crtc->cursor_enabled = false;
list_for_each_entry(crtci, &vbox->dev->mode_config.crtc_list, head)
if (to_vbox_crtc(crtci)->cursor_enabled)
cursor_enabled = true;
if (!cursor_enabled)
VBoxHGSMIUpdatePointerShape(&vbox->submit_info, 0, 0, 0, 0, 0, NULL, 0);
return 0;
}
vbox_crtc->cursor_enabled = true;
if ( width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT
|| width == 0 || height == 0)
return -EINVAL;
rc = VBoxQueryConfHGSMI(&vbox->submit_info,
VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps);
ret = -RTErrConvertToErrno(rc);
if (ret)
return ret;
if ( caps & VMMDEV_MOUSE_HOST_CANNOT_HWPOINTER
|| !(caps & VMMDEV_MOUSE_HOST_WANTS_ABSOLUTE))
return -EINVAL;
obj = drm_gem_object_lookup(crtc->dev, file_priv, handle);
if (obj)
{
bo = gem_to_vbox_bo(obj);
ret = vbox_bo_reserve(bo, false);
if (!ret)
{
/* The mask must be calculated based on the alpha channel, one bit
* per ARGB word, and must be 32-bit padded. */
mask_size = ((width + 7) / 8 * height + 3) & ~3;
data_size = width * height * 4 + mask_size;
vbox->cursor_hot_x = min((uint32_t)max(hot_x, 0), width);
vbox->cursor_hot_y = min((uint32_t)max(hot_y, 0), height);
vbox->cursor_width = width;
vbox->cursor_height = height;
vbox->cursor_data_size = data_size;
dst = vbox->cursor_data;
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map);
if (!ret)
{
src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem);
if (!src_isiomem)
{
uint32_t flags = VBOX_MOUSE_POINTER_VISIBLE
| VBOX_MOUSE_POINTER_SHAPE
| VBOX_MOUSE_POINTER_ALPHA;
copy_cursor_image(src, dst, width, height, mask_size);
rc = VBoxHGSMIUpdatePointerShape(&vbox->submit_info, flags,
vbox->cursor_hot_x,
vbox->cursor_hot_y,
width, height, dst,
data_size);
ret = -RTErrConvertToErrno(rc);
}
else
DRM_ERROR("src cursor bo should be in main memory\n");
ttm_bo_kunmap(&uobj_map);
}
else
vbox->cursor_data_size = 0;
vbox_bo_unreserve(bo);
}
drm_gem_object_unreference_unlocked(obj);
}
else
{
DRM_ERROR("Cannot find cursor object %x for crtc\n", handle);
ret = -ENOENT;
}
return ret;
}
static int vbox_cursor_move(struct drm_crtc *crtc,
int x, int y)
{
struct vbox_private *vbox = crtc->dev->dev_private;
uint32_t flags = VBOX_MOUSE_POINTER_VISIBLE
| VBOX_MOUSE_POINTER_SHAPE
| VBOX_MOUSE_POINTER_ALPHA;
uint32_t host_x, host_y;
uint32_t hot_x = 0;
uint32_t hot_y = 0;
int rc;
/* We compare these to unsigned later and don't need to handle negative. */
if (x + crtc->x < 0 || y + crtc->y < 0 || vbox->cursor_data_size == 0)
return 0;
rc = VBoxHGSMICursorPosition(&vbox->submit_info, true, x + crtc->x,
y + crtc->y, &host_x, &host_y);
if (RT_FAILURE(rc))
return -RTErrConvertToErrno(rc);
if (x + crtc->x < host_x)
hot_x = min(host_x - x - crtc->x, vbox->cursor_width);
if (y + crtc->y < host_y)
hot_y = min(host_y - y - crtc->y, vbox->cursor_height);
if (hot_x == vbox->cursor_hot_x && hot_y == vbox->cursor_hot_y)
return 0;
vbox->cursor_hot_x = hot_x;
vbox->cursor_hot_y = hot_y;
rc = VBoxHGSMIUpdatePointerShape(&vbox->submit_info, flags, hot_x, hot_y,
vbox->cursor_width, vbox->cursor_height,
vbox->cursor_data,
vbox->cursor_data_size);
return -RTErrConvertToErrno(rc);
}
/* $Id: vbox_ttm.c $ */
/** @file
* VirtualBox Additions Linux kernel video driver
*/
/*
* Copyright (C) 2013 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
* --------------------------------------------------------------------
*
* This code is based on
* ast_ttm.c
* with the following copyright and permission notice:
*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sub license, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
* DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
* OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
* USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial portions
* of the Software.
*
*/
/*
* Authors: Dave Airlie <airlied@redhat.com>
*/
#include "vbox_drv.h"
#include <ttm/ttm_page_alloc.h>
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
# define PLACEMENT_FLAGS(placement) (placement)
#else
# define PLACEMENT_FLAGS(placement) (placement).flags
#endif
static inline struct vbox_private *
vbox_bdev(struct ttm_bo_device *bd)
{
return container_of(bd, struct vbox_private, ttm.bdev);
}
static int
vbox_ttm_mem_global_init(struct drm_global_reference *ref)
{
return ttm_mem_global_init(ref->object);
}
static void
vbox_ttm_mem_global_release(struct drm_global_reference *ref)
{
ttm_mem_global_release(ref->object);
}
/**
* Adds the vbox memory manager object/structures to the global memory manager.
*/
static int vbox_ttm_global_init(struct vbox_private *vbox)
{
struct drm_global_reference *global_ref;
int r;
global_ref = &vbox->ttm.mem_global_ref;
global_ref->global_type = DRM_GLOBAL_TTM_MEM;
global_ref->size = sizeof(struct ttm_mem_global);
global_ref->init = &vbox_ttm_mem_global_init;
global_ref->release = &vbox_ttm_mem_global_release;
r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM memory accounting "
"subsystem.\n");
return r;
}
vbox->ttm.bo_global_ref.mem_glob =
vbox->ttm.mem_global_ref.object;
global_ref = &vbox->ttm.bo_global_ref.ref;
global_ref->global_type = DRM_GLOBAL_TTM_BO;
global_ref->size = sizeof(struct ttm_bo_global);
global_ref->init = &ttm_bo_global_init;
global_ref->release = &ttm_bo_global_release;
r = drm_global_item_ref(global_ref);
if (r != 0) {
DRM_ERROR("Failed setting up TTM BO subsystem.\n");
drm_global_item_unref(&vbox->ttm.mem_global_ref);
return r;
}
return 0;
}
/**
* Removes the vbox memory manager object from the global memory manager.
*/
static void
vbox_ttm_global_release(struct vbox_private *vbox)
{
if (vbox->ttm.mem_global_ref.release == NULL)
return;
drm_global_item_unref(&vbox->ttm.bo_global_ref.ref);
drm_global_item_unref(&vbox->ttm.mem_global_ref);
vbox->ttm.mem_global_ref.release = NULL;
}
static void vbox_bo_ttm_destroy(struct ttm_buffer_object *tbo)
{
struct vbox_bo *bo;
bo = container_of(tbo, struct vbox_bo, bo);
drm_gem_object_release(&bo->gem);
kfree(bo);
}
static bool vbox_ttm_bo_is_vbox_bo(struct ttm_buffer_object *bo)
{
if (bo->destroy == &vbox_bo_ttm_destroy)
return true;
return false;
}
static int
vbox_bo_init_mem_type(struct ttm_bo_device *bdev, uint32_t type,
struct ttm_mem_type_manager *man)
{
switch (type) {
case TTM_PL_SYSTEM:
man->flags = TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_MASK_CACHING;
man->default_caching = TTM_PL_FLAG_CACHED;
break;
case TTM_PL_VRAM:
man->func = &ttm_bo_manager_func;
man->flags = TTM_MEMTYPE_FLAG_FIXED |
TTM_MEMTYPE_FLAG_MAPPABLE;
man->available_caching = TTM_PL_FLAG_UNCACHED |
TTM_PL_FLAG_WC;
man->default_caching = TTM_PL_FLAG_WC;
break;
default:
DRM_ERROR("Unsupported memory type %u\n", (unsigned)type);
return -EINVAL;
}
return 0;
}
static void
vbox_bo_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *pl)
{
struct vbox_bo *vboxbo = vbox_bo(bo);
if (!vbox_ttm_bo_is_vbox_bo(bo))
return;
vbox_ttm_placement(vboxbo, TTM_PL_FLAG_SYSTEM);
*pl = vboxbo->placement;
}
static int vbox_bo_verify_access(struct ttm_buffer_object *bo, struct file *filp)
{
return 0;
}
static int vbox_ttm_io_mem_reserve(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem)
{
struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type];
struct vbox_private *vbox = vbox_bdev(bdev);
mem->bus.addr = NULL;
mem->bus.offset = 0;
mem->bus.size = mem->num_pages << PAGE_SHIFT;
mem->bus.base = 0;
mem->bus.is_iomem = false;
if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE))
return -EINVAL;
switch (mem->mem_type) {
case TTM_PL_SYSTEM:
/* system memory */
return 0;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
mem->bus.base = pci_resource_start(vbox->dev->pdev, 0);
mem->bus.is_iomem = true;
break;
default:
return -EINVAL;
break;
}
return 0;
}
static void vbox_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem)
{
}
static int vbox_bo_move(struct ttm_buffer_object *bo,
bool evict, bool interruptible,
bool no_wait_gpu,
struct ttm_mem_reg *new_mem)
{
int r;
r = ttm_bo_move_memcpy(bo, evict, no_wait_gpu, new_mem);
return r;
}
static void vbox_ttm_backend_destroy(struct ttm_tt *tt)
{
ttm_tt_fini(tt);
kfree(tt);
}
static struct ttm_backend_func vbox_tt_backend_func = {
.destroy = &vbox_ttm_backend_destroy,
};
static struct ttm_tt *vbox_ttm_tt_create(struct ttm_bo_device *bdev,
unsigned long size, uint32_t page_flags,
struct page *dummy_read_page)
{
struct ttm_tt *tt;
tt = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL);
if (tt == NULL)
return NULL;
tt->func = &vbox_tt_backend_func;
if (ttm_tt_init(tt, bdev, size, page_flags, dummy_read_page)) {
kfree(tt);
return NULL;
}
return tt;
}
static int vbox_ttm_tt_populate(struct ttm_tt *ttm)
{
return ttm_pool_populate(ttm);
}
static void vbox_ttm_tt_unpopulate(struct ttm_tt *ttm)
{
ttm_pool_unpopulate(ttm);
}
struct ttm_bo_driver vbox_bo_driver = {
.ttm_tt_create = vbox_ttm_tt_create,
.ttm_tt_populate = vbox_ttm_tt_populate,
.ttm_tt_unpopulate = vbox_ttm_tt_unpopulate,
.init_mem_type = vbox_bo_init_mem_type,
.evict_flags = vbox_bo_evict_flags,
.move = vbox_bo_move,
.verify_access = vbox_bo_verify_access,
.io_mem_reserve = &vbox_ttm_io_mem_reserve,
.io_mem_free = &vbox_ttm_io_mem_free,
};
int vbox_mm_init(struct vbox_private *vbox)
{
int ret;
struct drm_device *dev = vbox->dev;
struct ttm_bo_device *bdev = &vbox->ttm.bdev;
ret = vbox_ttm_global_init(vbox);
if (ret)
return ret;
ret = ttm_bo_device_init(&vbox->ttm.bdev,
vbox->ttm.bo_global_ref.ref.object,
&vbox_bo_driver,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0)
dev->anon_inode->i_mapping,
#endif
DRM_FILE_PAGE_OFFSET,
true);
if (ret) {
DRM_ERROR("Error initialising bo driver; %d\n", ret);
return ret;
}
ret = ttm_bo_init_mm(bdev, TTM_PL_VRAM,
vbox->vram_size >> PAGE_SHIFT);
if (ret) {
DRM_ERROR("Failed ttm VRAM init: %d\n", ret);
return ret;
}
#ifdef DRM_MTRR_WC
vbox->fb_mtrr = drm_mtrr_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0),
DRM_MTRR_WC);
#else
vbox->fb_mtrr = arch_phys_wc_add(pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0));
#endif
vbox->ttm.mm_initialised = true;
return 0;
}
void vbox_mm_fini(struct vbox_private *vbox)
{
#ifdef DRM_MTRR_WC
struct drm_device *dev = vbox->dev;
#endif
if (!vbox->ttm.mm_initialised)
return;
ttm_bo_device_release(&vbox->ttm.bdev);
vbox_ttm_global_release(vbox);
#ifdef DRM_MTRR_WC
drm_mtrr_del(vbox->fb_mtrr,
pci_resource_start(dev->pdev, 0),
pci_resource_len(dev->pdev, 0), DRM_MTRR_WC);
#else
arch_phys_wc_del(vbox->fb_mtrr);
#endif
}
void vbox_ttm_placement(struct vbox_bo *bo, int domain)
{
u32 c = 0;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
bo->placement.fpfn = 0;
bo->placement.lpfn = 0;
#else
unsigned i;
#endif
bo->placement.placement = bo->placements;
bo->placement.busy_placement = bo->placements;
if (domain & TTM_PL_FLAG_VRAM)
PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED | TTM_PL_FLAG_VRAM;
if (domain & TTM_PL_FLAG_SYSTEM)
PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
if (!c)
PLACEMENT_FLAGS(bo->placements[c++]) = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM;
bo->placement.num_placement = c;
bo->placement.num_busy_placement = c;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
for (i = 0; i < c; ++i) {
bo->placements[i].fpfn = 0;
bo->placements[i].lpfn = 0;
}
#endif
}
int vbox_bo_create(struct drm_device *dev, int size, int align,
uint32_t flags, struct vbox_bo **pvboxbo)
{
struct vbox_private *vbox = dev->dev_private;
struct vbox_bo *vboxbo;
size_t acc_size;
int ret;
vboxbo = kzalloc(sizeof(struct vbox_bo), GFP_KERNEL);
if (!vboxbo)
return -ENOMEM;
ret = drm_gem_object_init(dev, &vboxbo->gem, size);
if (ret) {
kfree(vboxbo);
return ret;
}
vboxbo->bo.bdev = &vbox->ttm.bdev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 15, 0)
vboxbo->bo.bdev->dev_mapping = dev->dev_mapping;
#endif
vbox_ttm_placement(vboxbo, TTM_PL_FLAG_VRAM | TTM_PL_FLAG_SYSTEM);
acc_size = ttm_bo_dma_acc_size(&vbox->ttm.bdev, size,
sizeof(struct vbox_bo));
ret = ttm_bo_init(&vbox->ttm.bdev, &vboxbo->bo, size,
ttm_bo_type_device, &vboxbo->placement,
align >> PAGE_SHIFT, false, NULL, acc_size,
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
NULL,
#endif
NULL, vbox_bo_ttm_destroy);
if (ret)
return ret;
*pvboxbo = vboxbo;
return 0;
}
static inline u64 vbox_bo_gpu_offset(struct vbox_bo *bo)
{
return bo->bo.offset;
}
int vbox_bo_pin(struct vbox_bo *bo, u32 pl_flag, u64 *gpu_addr)
{
int i, ret;
if (bo->pin_count) {
bo->pin_count++;
if (gpu_addr)
*gpu_addr = vbox_bo_gpu_offset(bo);
return 0;
}
vbox_ttm_placement(bo, pl_flag);
for (i = 0; i < bo->placement.num_placement; i++)
PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
bo->pin_count = 1;
if (gpu_addr)
*gpu_addr = vbox_bo_gpu_offset(bo);
return 0;
}
int vbox_bo_unpin(struct vbox_bo *bo)
{
int i, ret;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
return 0;
}
bo->pin_count--;
if (bo->pin_count)
return 0;
for (i = 0; i < bo->placement.num_placement ; i++)
PLACEMENT_FLAGS(bo->placements[i]) &= ~TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret)
return ret;
return 0;
}
/* Move a vbox-owned buffer object to system memory if no one else has it
* pinned. The caller must have pinned it previously, and this call will
* release the caller's pin. */
int vbox_bo_push_sysram(struct vbox_bo *bo)
{
int i, ret;
if (!bo->pin_count) {
DRM_ERROR("unpin bad %p\n", bo);
return 0;
}
bo->pin_count--;
if (bo->pin_count)
return 0;
if (bo->kmap.virtual)
ttm_bo_kunmap(&bo->kmap);
vbox_ttm_placement(bo, TTM_PL_FLAG_SYSTEM);
for (i = 0; i < bo->placement.num_placement ; i++)
PLACEMENT_FLAGS(bo->placements[i]) |= TTM_PL_FLAG_NO_EVICT;
ret = ttm_bo_validate(&bo->bo, &bo->placement, false, false);
if (ret) {
DRM_ERROR("pushing to VRAM failed\n");
return ret;
}
return 0;
}
int vbox_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
struct vbox_private *vbox;
if (unlikely(vma->vm_pgoff < DRM_FILE_PAGE_OFFSET))
return -EINVAL;
file_priv = filp->private_data;
vbox = file_priv->minor->dev->dev_private;
return ttm_bo_mmap(filp, vma, &vbox->ttm.bdev);
}
/* $Id: vboxvideo_drm.c $ */
/** @file
* VirtualBox Additions Linux kernel driver, DRM support
*/
/*
* Copyright (C) 2006-2012 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
* --------------------------------------------------------------------
*
* This code is based on:
*
* tdfx_drv.c -- tdfx driver -*- linux-c -*-
* Created: Thu Oct 7 10:38:32 1999 by faith@precisioninsight.com
*
* Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*
* Authors:
* Rickard E. (Rik) Faith <faith@valinux.com>
* Daryll Strauss <daryll@valinux.com>
* Gareth Hughes <gareth@valinux.com>
*/
#include "version-generated.h"
#include <linux/module.h>
#include <linux/version.h>
#include <drm/drmP.h>
#include "vboxvideo_drm.h"
/* This definition and the file-operations-as-pointer change were both added in
* kernel 3.3. All back-ports of the structure change to distribution kernels
* that I have checked also back-ported the definition at the same time. */
#ifdef DRM_IOCTL_MODE_ADDFB2
# define DRM_FOPS_AS_POINTER
#endif
/* The first of these was introduced when drm was generalised to work with
* non-PCI buses, but was removed between 3.15 and 3.16. The second is a
* random definition introduced in the mean-time. */
#if defined(DRIVER_BUS_PCI) || defined(DRIVER_PRIME)
# define DRM_NEW_BUS_INIT 1
#endif
#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 18, 0)
# ifdef RHEL_RELEASE_CODE
# if RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7, 2)
# define DRM_HAVE_DRM_MAP
# endif
# else
# define DRM_HAVE_DRM_MAP
# endif
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0)
# define DRM_WANTS_SET_BUSID
#else
# ifdef RHEL_RELEASE_CODE
# if RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7, 2)
# define DRM_WANTS_SET_BUSID
# endif
# endif
#endif
static struct pci_device_id pciidlist[] = {
vboxvideo_PCI_IDS
};
MODULE_DEVICE_TABLE(pci, pciidlist);
int vboxvideo_driver_load(struct drm_device * dev, unsigned long flags)
{
return 0;
}
#ifdef DRM_FOPS_AS_POINTER
/* since linux-3.3.0-rc1 drm_driver::fops is pointer */
static struct file_operations driver_fops =
{
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
# ifdef DRM_HAVE_DRM_MAP
/* This shouldn't be necessary even for old kernels as there is
* nothing sensible to mmap. But we play safe and keep it for
* legacy reasons. */
.mmap = drm_mmap,
# endif
.poll = drm_poll,
};
#endif
static struct drm_driver driver =
{
/* .driver_features = DRIVER_USE_MTRR, */
.load = vboxvideo_driver_load,
#ifdef DRM_WANTS_SET_BUSID
/* If this is missing a warning gets printed to dmesg. We will not
* attempt to make kernels work to which the change (915b4d11b) got back-
* ported, as the problem is only cosmetic. */
.set_busid = drm_pci_set_busid,
#endif
# ifndef DRM_FOPS_AS_POINTER
.fops =
{
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
/* This was changed with Linux 2.6.33 but Fedora backported this
* change to their 2.6.32 kernel. */
#if defined(DRM_UNLOCKED)
.unlocked_ioctl = drm_ioctl,
#else
.ioctl = drm_ioctl,
#endif
.mmap = drm_mmap,
.poll = drm_poll,
},
#else /* defined(DRM_FOPS_AS_POINTER) */
.fops = &driver_fops,
#endif
#ifndef DRM_NEW_BUS_INIT
.pci_driver =
{
.name = DRIVER_NAME,
.id_table = pciidlist,
},
#endif
.name = DRIVER_NAME,
.desc = DRIVER_DESC,
.date = DRIVER_DATE,
.major = DRIVER_MAJOR,
.minor = DRIVER_MINOR,
.patchlevel = DRIVER_PATCHLEVEL,
};
#ifdef DRM_NEW_BUS_INIT
static struct pci_driver pci_driver =
{
.name = DRIVER_NAME,
.id_table = pciidlist,
};
#endif
static int __init vboxvideo_init(void)
{
#ifndef DRM_NEW_BUS_INIT
return drm_init(&driver);
#else
return drm_pci_init(&driver, &pci_driver);
#endif
}
static void __exit vboxvideo_exit(void)
{
#ifndef DRM_NEW_BUS_INIT
drm_exit(&driver);
#else
drm_pci_exit(&driver, &pci_driver);
#endif
}
module_init(vboxvideo_init);
module_exit(vboxvideo_exit);
MODULE_AUTHOR(DRIVER_AUTHOR);
MODULE_DESCRIPTION(DRIVER_DESC);
#ifdef MODULE_VERSION
MODULE_VERSION(VBOX_VERSION_STRING);
#endif
MODULE_LICENSE("GPL and additional rights");
/* $Id: vboxvideo_drm.h $ */
/** @file
* VirtualBox Additions Linux kernel driver, DRM support
*/
/*
* Copyright (C) 2006-2010 Oracle Corporation
*
* This file is part of VirtualBox Open Source Edition (OSE), as
* available from http://www.virtualbox.org. This file is free software;
* you can redistribute it and/or modify it under the terms of the GNU
* General Public License (GPL) as published by the Free Software
* Foundation, in version 2 as it comes in the "COPYING" file of the
* VirtualBox OSE distribution. VirtualBox OSE is distributed in the
* hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
* --------------------------------------------------------------------
*
* This code is based on:
*
* tdfx.h -- 3dfx DRM template customization -*- linux-c -*-
* Created: Wed Feb 14 12:32:32 2001 by gareth@valinux.com
*
* Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors:
* Gareth Hughes <gareth@valinux.com>
*/
#ifndef __VBOXVIDEO_H__
#define __VBOXVIDEO_H__
/* General customization:
*/
#include "product-generated.h"
#define DRIVER_AUTHOR VBOX_VENDOR
#define DRIVER_NAME "vboxvideo"
#define DRIVER_DESC VBOX_PRODUCT " Graphics Card"
#define DRIVER_DATE "20090303"
#define DRIVER_MAJOR 1
#define DRIVER_MINOR 0
#define DRIVER_PATCHLEVEL 0
#define vboxvideo_PCI_IDS \
{0x80ee, 0xbeef, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \
{0, 0, 0}
#endif
......@@ -3,9 +3,9 @@
#define VBOX_VERSION_MAJOR 5
#define VBOX_VERSION_MINOR 0
#define VBOX_VERSION_BUILD 16
#define VBOX_VERSION_STRING_RAW "5.0.16"
#define VBOX_VERSION_STRING "5.0.16_Ubuntu"
#define VBOX_VERSION_BUILD 18
#define VBOX_VERSION_STRING_RAW "5.0.18"
#define VBOX_VERSION_STRING "5.0.18_Ubuntu"
#define VBOX_API_VERSION_STRING "5_0"
#define VBOX_PRIVATE_BUILD_DESC "Private build by root"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment