Tryag File Manager
Home
-
Turbo Force
Current Path :
/
proc
/
self
/
root
/
usr
/
lib
/
vmware-tools
/
modules
/
source
/
Upload File :
New :
File
Dir
//proc/self/root/usr/lib/vmware-tools/modules/source/vmhgfs.tar
vmhgfs-only/ 0000755 0000000 0000000 00000000000 13432726375 012041 5 ustar root root vmhgfs-only/request.c 0000444 0000000 0000000 00000017336 13432725306 013676 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * request.c -- * * Functions dealing with the creation, deletion, and sending of HGFS * requests are defined here. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include <asm/atomic.h> #include <linux/list.h> #include <linux/signal.h> #include "compat_kernel.h" #include "compat_sched.h" #include "compat_semaphore.h" #include "compat_slab.h" #include "compat_spinlock.h" #include "module.h" #include "request.h" #include "transport.h" #include "fsutil.h" #include "vm_assert.h" /* *---------------------------------------------------------------------- * * HgfsRequestInit -- * * Initializes new request structure. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsRequestInit(HgfsReq *req, // IN: request to initialize int requestId) // IN: ID assigned to the request { ASSERT(req); kref_init(&req->kref); INIT_LIST_HEAD(&req->list); init_waitqueue_head(&req->queue); req->id = requestId; req->payloadSize = 0; req->state = HGFS_REQ_STATE_ALLOCATED; req->numEntries = 0; } /* *---------------------------------------------------------------------- * * HgfsGetNewRequest -- * * Allocates and initializes new request structure. * * Results: * On success the new struct is returned with all fields * initialized. Returns NULL on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ HgfsReq * HgfsGetNewRequest(void) { static atomic_t hgfsIdCounter = ATOMIC_INIT(0); HgfsReq *req; req = HgfsTransportAllocateRequest(HGFS_PACKET_MAX); if (req == NULL) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: can't allocate memory\n", __func__)); return NULL; } HgfsRequestInit(req, atomic_inc_return(&hgfsIdCounter) - 1); return req; } /* *---------------------------------------------------------------------- * * HgfsCopyRequest -- * * Allocates and initializes new request structure and copies * existing request into it. * * Results: * On success the new struct is returned with all fields * initialized. Returns NULL on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ HgfsReq * HgfsCopyRequest(HgfsReq *req) // IN: request to be copied { HgfsReq *newReq; ASSERT(req); newReq = HgfsTransportAllocateRequest(req->bufferSize); if (newReq == NULL) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: can't allocate memory\n", __func__)); return NULL; } HgfsRequestInit(newReq, req->id); memcpy(newReq->dataPacket, req->dataPacket, req->numEntries * sizeof (req->dataPacket[0])); newReq->numEntries = req->numEntries; newReq->payloadSize = req->payloadSize; memcpy(newReq->payload, req->payload, req->payloadSize); return newReq; } /* *---------------------------------------------------------------------- * * HgfsSendRequest -- * * Send out an HGFS request via transport layer, and wait for the reply. * * Results: * Returns zero on success, negative number on error. * * Side effects: * None * *---------------------------------------------------------------------- */ int HgfsSendRequest(HgfsReq *req) // IN/OUT: Outgoing request { int ret; ASSERT(req); ASSERT(req->payloadSize <= req->bufferSize); req->state = HGFS_REQ_STATE_UNSENT; LOG(10, (KERN_DEBUG "VMware hgfs: HgfsSendRequest: Sending request id %d\n", req->id)); ret = HgfsTransportSendRequest(req); LOG(10, (KERN_DEBUG "VMware hgfs: HgfsSendRequest: request finished, " "return %d\n", ret)); return ret; } /* *---------------------------------------------------------------------- * * HgfsRequestFreeMemory -- * * Frees memory allocated for a request. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsRequestFreeMemory(struct kref *kref) { HgfsReq *req = container_of(kref, HgfsReq, kref); LOG(10, (KERN_DEBUG "VMware hgfs: %s: freeing request %d\n", __func__, req->id)); HgfsTransportFreeRequest(req); } /* *---------------------------------------------------------------------- * * HgfsRequestPutRef -- * * Decrease reference count of HGFS request. * * Results: * None * * Side effects: * May cause request to be destroyed. * *---------------------------------------------------------------------- */ void HgfsRequestPutRef(HgfsReq *req) // IN: Request { if (req) { LOG(10, (KERN_DEBUG "VMware hgfs: %s: request %d\n", __func__, req->id)); kref_put(&req->kref, HgfsRequestFreeMemory); } } /* *---------------------------------------------------------------------- * * HgfsRequestGetRef -- * * Increment reference count of HGFS request. * * Results: * Pointer to the same HGFS request. * * Side effects: * None * *---------------------------------------------------------------------- */ HgfsReq * HgfsRequestGetRef(HgfsReq *req) // IN: Request { if (req) { LOG(10, (KERN_DEBUG "VMware hgfs: %s: request %d\n", __func__, req->id)); kref_get(&req->kref); } return req; } /* *---------------------------------------------------------------------- * * HgfsReplyStatus -- * * Return reply status. * * Results: * Returns reply status as per the protocol. * XXX: Needs changes when vmci headers are added. * * Side effects: * None * *---------------------------------------------------------------------- */ HgfsStatus HgfsReplyStatus(HgfsReq *req) // IN { HgfsReply *rep; rep = (HgfsReply *)(HGFS_REQ_PAYLOAD(req)); return rep->status; } /* *---------------------------------------------------------------------- * * HgfsCompleteReq -- * * Marks request as completed and wakes up sender. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ void HgfsCompleteReq(HgfsReq *req) // IN: Request { ASSERT(req); req->state = HGFS_REQ_STATE_COMPLETED; /* Wake up the client process waiting for the reply to this request. */ wake_up(&req->queue); } /* *---------------------------------------------------------------------- * * HgfsFailReq -- * * Marks request as failed and calls HgfsCompleteReq to wake up * sender. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ void HgfsFailReq(HgfsReq *req, // IN: erequest to be marked failed int error) // IN: error code { HgfsReply *reply = req->payload; reply->id = req->id; reply->status = error; req->payloadSize = sizeof *reply; HgfsCompleteReq(req); } vmhgfs-only/module.c 0000644 0000000 0000000 00000005605 13432725306 013471 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * module.c -- * * Module-specific components of the vmhgfs driver. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include <linux/errno.h> #include "compat_module.h" #include "filesystem.h" #include "module.h" #include "vmhgfs_version.h" #ifdef VMX86_DEVEL /* * Logging is available only in devel build. */ int LOGLEVEL_THRESHOLD = 4; module_param(LOGLEVEL_THRESHOLD, int, 0444); MODULE_PARM_DESC(LOGLEVEL_THRESHOLD, "Set verbosity (0 means no log, 10 means very verbose, 4 is default)"); #endif /* Module information. */ MODULE_AUTHOR("VMware, Inc."); MODULE_DESCRIPTION("VMware Host/Guest File System"); MODULE_VERSION(VMHGFS_DRIVER_VERSION_STRING); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("vmware_vmhgfs"); /* * Starting with SLE10sp2, Novell requires that IHVs sign a support agreement * with them and mark their kernel modules as externally supported via a * change to the module header. If this isn't done, the module will not load * by default (i.e., neither mkinitrd nor modprobe will accept it). */ MODULE_INFO(supported, "external"); /* *---------------------------------------------------------------------- * * init_module -- * * linux module entry point. Called by /sbin/insmod command. * Sets up internal state and registers the hgfs filesystem * with the kernel. * * Results: * Returns 0 on success, an error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ int init_module(void) { return HgfsInitFileSystem() ? 0 : -EBUSY; } /* *---------------------------------------------------------------------- * * cleanup_module -- * * Called by /sbin/rmmod. Unregisters filesystem with kernel, * cleans up internal state, and unloads module. * * Note: for true kernel 2.4 compliance, this should be * "module_exit". * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ void cleanup_module(void) { HgfsCleanupFileSystem(); } vmhgfs-only/Makefile 0000644 0000000 0000000 00000007363 13432725347 013510 0 ustar root root #!/usr/bin/make -f ########################################################## # Copyright (C) 1998-2016 VMware, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation version 2 and no later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # ########################################################## #### #### VMware kernel module Makefile to be distributed externally #### #### #### SRCROOT _must_ be a relative path. #### SRCROOT = . # # open-vm-tools doesn't replicate shared source files for different modules; # instead, files are kept in shared locations. So define a few useful macros # to be able to handle both cases cleanly. # INCLUDE := ifdef OVT_SOURCE_DIR AUTOCONF_DIR := $(OVT_SOURCE_DIR)/modules/linux/shared/autoconf VMLIB_PATH = $(OVT_SOURCE_DIR)/lib/$(1) INCLUDE += -I$(OVT_SOURCE_DIR)/modules/linux/shared INCLUDE += -I$(OVT_SOURCE_DIR)/lib/include else AUTOCONF_DIR := $(SRCROOT)/shared/autoconf INCLUDE += -I$(SRCROOT)/shared endif VM_UNAME = $(shell uname -r) # Header directory for the running kernel ifdef LINUXINCLUDE HEADER_DIR = $(LINUXINCLUDE) else HEADER_DIR = /lib/modules/$(VM_UNAME)/build/include endif BUILD_DIR = $(HEADER_DIR)/.. DRIVER := vmhgfs PRODUCT := tools # Grep program GREP = /bin/grep vm_check_gcc = $(shell if $(CC) $(1) -S -o /dev/null -xc /dev/null \ > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi) vm_check_file = $(shell if test -f $(1); then echo "yes"; else echo "no"; fi) ifndef VM_KBUILD VM_KBUILD := no ifeq ($(call vm_check_file,$(BUILD_DIR)/Makefile), yes) VM_KBUILD := yes endif export VM_KBUILD endif ifndef VM_KBUILD_SHOWN ifeq ($(VM_KBUILD), no) VM_DUMMY := $(shell echo >&2 "Using standalone build system.") else VM_DUMMY := $(shell echo >&2 "Using kernel build system.") endif VM_KBUILD_SHOWN := yes export VM_KBUILD_SHOWN endif ifneq ($(VM_KBUILD), no) # If there is no version defined, we are in toplevel pass, not yet in kernel makefiles... ifeq ($(VERSION),) DRIVER_KO := $(DRIVER).ko .PHONY: $(DRIVER_KO) auto-build: $(DRIVER_KO) cp -f $< $(SRCROOT)/../$(DRIVER).o # $(DRIVER_KO) is a phony target, so compare file times explicitly $(DRIVER): $(DRIVER_KO) if [ $< -nt $@ ] || [ ! -e $@ ] ; then cp -f $< $@; fi # # Define a setup target that gets built before the actual driver. # This target may not be used at all, but if it is then it will be defined # in Makefile.kernel # prebuild:: ; postbuild:: ; $(DRIVER_KO): prebuild $(MAKE) -C $(BUILD_DIR) SUBDIRS=$$PWD SRCROOT=$$PWD/$(SRCROOT) \ MODULEBUILDDIR=$(MODULEBUILDDIR) modules $(MAKE) -C $$PWD SRCROOT=$$PWD/$(SRCROOT) \ MODULEBUILDDIR=$(MODULEBUILDDIR) postbuild endif vm_check_build = $(shell if $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) \ $(CPPFLAGS) $(CFLAGS) $(CFLAGS_KERNEL) $(LINUXINCLUDE) \ $(EXTRA_CFLAGS) -Iinclude2/asm/mach-default \ -DKBUILD_BASENAME=\"$(DRIVER)\" \ -Werror -S -o /dev/null -xc $(1) \ > /dev/null 2>&1; then echo "$(2)"; else echo "$(3)"; fi) CC_WARNINGS := -Wall -Wstrict-prototypes CC_OPTS := $(GLOBAL_DEFS) $(CC_WARNINGS) -DVMW_USING_KBUILD ifdef VMX86_DEVEL CC_OPTS += -DVMX86_DEVEL endif ifdef VMX86_DEBUG CC_OPTS += -DVMX86_DEBUG endif include $(SRCROOT)/Makefile.kernel else include $(SRCROOT)/Makefile.normal endif #.SILENT: vmhgfs-only/hgfsUtil.h 0000444 0000000 0000000 00000013401 13432725346 013771 0 ustar root root /********************************************************* * Copyright (C) 1998-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * hgfsUtil.h -- * * Utility functions and macros used by hgfs. */ #ifndef _HGFSUTIL_H_ # define _HGFSUTIL_H_ # if defined __linux__ && defined __KERNEL__ # include "driver-config.h" # include <linux/time.h> // for time_t and timespec /* Include time.h in userspace code, but not in Solaris kernel code. */ # elif defined __FreeBSD__ && defined _KERNEL /* Do nothing. */ # elif defined __APPLE__ && defined KERNEL # include <sys/time.h> # else # include <time.h> # endif # include "vm_basic_types.h" # if !defined _STRUCT_TIMESPEC && \ !defined _TIMESPEC_DECLARED && \ !defined __timespec_defined && \ !defined sun && \ !defined __FreeBSD__ && \ !__APPLE__ && \ !defined _WIN32 struct timespec { time_t tv_sec; long tv_nsec; }; # endif # include "hgfs.h" /* Cross-platform representation of a platform-specific error code. */ #ifndef _WIN32 # if defined __KERNEL__ || defined _KERNEL || defined KERNEL # if defined __linux__ # include <linux/errno.h> # elif defined sun || defined __FreeBSD__ || defined __APPLE__ # include <sys/errno.h> # endif # else # include <errno.h> # endif typedef int HgfsInternalStatus; /* * There is no internal error in Linux. * Define a const that is converted to HGFS_INTERNAL_STATUS_ERROR. */ # define EINTERNAL 1001 #else # include <windows.h> typedef DWORD HgfsInternalStatus; #endif #if defined _WIN32 #define HGFS_ERROR_SUCCESS ERROR_SUCCESS #define HGFS_ERROR_IO ERROR_IO_DEVICE #define HGFS_ERROR_ACCESS_DENIED ERROR_ACCESS_DENIED #define HGFS_ERROR_INVALID_PARAMETER ERROR_INVALID_PARAMETER #define HGFS_ERROR_INVALID_HANDLE ERROR_INVALID_HANDLE #define HGFS_ERROR_PROTOCOL RPC_S_PROTOCOL_ERROR #define HGFS_ERROR_STALE_SESSION ERROR_CONNECTION_INVALID #define HGFS_ERROR_BUSY ERROR_RETRY #define HGFS_ERROR_PATH_BUSY ERROR_RETRY #define HGFS_ERROR_FILE_NOT_FOUND ERROR_FILE_NOT_FOUND #define HGFS_ERROR_FILE_EXIST ERROR_ALREADY_EXISTS #define HGFS_ERROR_NOT_SUPPORTED ERROR_NOT_SUPPORTED #define HGFS_ERROR_NOT_ENOUGH_MEMORY ERROR_NOT_ENOUGH_MEMORY #define HGFS_ERROR_TOO_MANY_SESSIONS ERROR_MAX_SESSIONS_REACHED #define HGFS_ERROR_INTERNAL ERROR_INTERNAL_ERROR #else #define HGFS_ERROR_SUCCESS 0 #define HGFS_ERROR_IO EIO #define HGFS_ERROR_ACCESS_DENIED EACCES #define HGFS_ERROR_INVALID_PARAMETER EINVAL #define HGFS_ERROR_INVALID_HANDLE EBADF #define HGFS_ERROR_PROTOCOL EPROTO #define HGFS_ERROR_STALE_SESSION ENETRESET #define HGFS_ERROR_BUSY EBUSY #define HGFS_ERROR_PATH_BUSY EBUSY #define HGFS_ERROR_FILE_NOT_FOUND ENOENT #define HGFS_ERROR_FILE_EXIST EEXIST #define HGFS_ERROR_NOT_SUPPORTED EOPNOTSUPP #define HGFS_ERROR_NOT_ENOUGH_MEMORY ENOMEM #define HGFS_ERROR_TOO_MANY_SESSIONS ECONNREFUSED #define HGFS_ERROR_INTERNAL EINTERNAL #endif // _WIN32 /* * Unfortunately, we need a catch-all "generic error" to use with * HgfsInternalStatus, because there are times when cross-platform code needs * to return its own errors along with errors from platform specific code. * * Using -1 should be safe because we expect our platforms to use zero as * success and a positive range of numbers as error values. */ #define HGFS_INTERNAL_STATUS_ERROR (-1) #ifndef _WIN32 /* * This error code is used to notify the client that some of the parameters passed * (e.g. file handles) are not supported. Clients are expected to correct * the parameter (e.g. pass file name instead) and retry. * * Note that this error code is artificially made up and in future may conflict * with an "official" error code when added. */ #define EPARAMETERNOTSUPPORTED (MAX_INT32 - 1) #endif /* * FreeBSD (pre-6.0) does not define EPROTO, so we'll define our own error code. */ #if defined __FreeBSD__ && !defined EPROTO #define EPROTO (ELAST + 1) #endif #define HGFS_NAME_BUFFER_SIZE(packetSize, request) (packetSize - (sizeof *request - 1)) #define HGFS_NAME_BUFFER_SIZET(packetSize, sizet) (packetSize - ((sizet) - 1)) #ifndef _WIN32 /* * Routines for converting between Win NT and unix time formats. The * hgfs attributes use the NT time formats, so the linux driver and * server have to convert back and forth. [bac] */ uint64 HgfsConvertToNtTime(time_t unixTime, // IN long nsec); // IN static INLINE uint64 HgfsConvertTimeSpecToNtTime(const struct timespec *unixTime) // IN { return HgfsConvertToNtTime(unixTime->tv_sec, unixTime->tv_nsec); } int HgfsConvertFromNtTime(time_t * unixTime, // OUT uint64 ntTime); // IN int HgfsConvertFromNtTimeNsec(struct timespec *unixTime, // OUT uint64 ntTime); // IN #endif /* !def(_WIN32) */ HgfsStatus HgfsConvertFromInternalStatus(HgfsInternalStatus status); // IN #endif /* _HGFSUTIL_H_ */ vmhgfs-only/message.h 0000444 0000000 0000000 00000003640 13432725330 013625 0 ustar root root /********************************************************* * Copyright (C) 1999-2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * message.h -- * * Second layer of the internal communication channel between guest * applications and vmware */ #ifndef __MESSAGE_H__ # define __MESSAGE_H__ #include "vm_basic_types.h" #ifdef __cplusplus extern "C" { #endif /* The channel object */ typedef struct Message_Channel { /* Identifier */ uint16 id; /* Reception buffer */ /* Data */ unsigned char *in; /* Allocated size */ size_t inAlloc; Bool inPreallocated; /* The cookie */ uint32 cookieHigh; uint32 cookieLow; } Message_Channel; Bool Message_OpenAllocated(uint32 proto, Message_Channel *chan, char *receiveBuffer, size_t receiveBufferSize); Message_Channel* Message_Open(uint32 proto); Bool Message_Send(Message_Channel *chan, const unsigned char *buf, size_t bufSize); Bool Message_Receive(Message_Channel *chan, unsigned char **buf, size_t *bufSize); Bool Message_CloseAllocated(Message_Channel *chan); Bool Message_Close(Message_Channel *chan); #ifdef __cplusplus } #endif #endif /* __MESSAGE_H__ */ vmhgfs-only/rpcout.c 0000444 0000000 0000000 00000036527 13432725330 013522 0 ustar root root /********************************************************* * Copyright (C) 2004-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * rpcout.c -- * * Remote Procedure Call between VMware and guest applications * C implementation. * * This module contains implements the out (guest=>host) direction only. * The in and out modules are separate since some applications (e.g. * drivers that want to do RPC-based logging) only want/need/can have the * out direction. */ #if defined(__KERNEL__) || defined(_KERNEL) || defined(KERNEL) # include "kernelStubs.h" #else # include <stdio.h> # include <string.h> # include <stdlib.h> # include <stdarg.h> # include "str.h" # include "debug.h" #endif #include "vmware.h" #include "rpcout.h" #include "message.h" /* * The RpcOut object */ struct RpcOut { Message_Channel channel; Bool started; }; /* *----------------------------------------------------------------------------- * * RpcOutInitialize -- * * Initializes an already allocated RpcOut object. * * Results: * None. * * Side effects: * See above. * *----------------------------------------------------------------------------- */ static void RpcOutInitialize(RpcOut *rpcOut) { memset(rpcOut, 0, sizeof *rpcOut); } /* *----------------------------------------------------------------------------- * * RpcOut_Construct -- * * Constructor for the RpcOut object * * Results: * New RpcOut object. * * Side effects: * Allocates memory. * *----------------------------------------------------------------------------- */ RpcOut * RpcOut_Construct(void) { RpcOut *rpcOut = malloc(sizeof *rpcOut); RpcOutInitialize(rpcOut); return rpcOut; } /* *----------------------------------------------------------------------------- * * RpcOut_Destruct -- * * Destructor for the RpcOut object. * * Results: * None. * * Side effects: * Frees RpcOut object memory. * *----------------------------------------------------------------------------- */ void RpcOut_Destruct(RpcOut *out) // IN { ASSERT(out != NULL); ASSERT(!out->started); free(out); } /* *----------------------------------------------------------------------------- * * RpcOut_startWithReceiveBuffer -- * * Open the channel. This variant of RpcOut_start allows the * caller to pre-allocate the receiver buffer, but by doing so it * allows for some simple operations without the need to call * malloc. * * Passing in NULL and size 0 will cause this function to fall * back to using malloc. * * Result: * TRUE on success * FALSE on failure * * Side-effects: * None * *----------------------------------------------------------------------------- */ Bool RpcOut_startWithReceiveBuffer(RpcOut *out, char *receiveBuffer, size_t receiveBufferSize) { ASSERT(out != NULL); ASSERT(!out->started); out->started = Message_OpenAllocated(RPCI_PROTOCOL_NUM, &out->channel, receiveBuffer, receiveBufferSize); if (!out->started) { Debug("RpcOut: couldn't open channel with RPCI protocol\n"); } return out->started; } /* *----------------------------------------------------------------------------- * * RpcOut_start -- * * Open the channel * * Result: * TRUE on success * FALSE on failure * * Side-effects: * None * *----------------------------------------------------------------------------- */ Bool RpcOut_start(RpcOut *out) // IN { return RpcOut_startWithReceiveBuffer(out, NULL, 0); } /* *----------------------------------------------------------------------------- * * RpcOut_send -- * * Make VMware synchroneously execute a TCLO command * * Unlike the other send varieties, RpcOut_send requires that the * caller pass non-NULL reply and repLen arguments. * * Result * TRUE if RPC was sent successfully. 'reply' contains the result of the rpc. * rpcStatus tells if the RPC command was processed successfully. * * FALSE if RPC could not be sent successfully. 'reply' will contain a * description of the error. * * In both cases, the caller should not free the reply. * * Side-effects * None * *----------------------------------------------------------------------------- */ Bool RpcOut_send(RpcOut *out, // IN char const *request, // IN size_t reqLen, // IN Bool *rpcStatus, // OUT char const **reply, // OUT size_t *repLen) // OUT { unsigned char *myReply; size_t myRepLen; Bool success; ASSERT(out != NULL); ASSERT(out->started); if (Message_Send(&out->channel, (const unsigned char *)request, reqLen) == FALSE) { *reply = "RpcOut: Unable to send the RPCI command"; *repLen = strlen(*reply); return FALSE; } if (Message_Receive(&out->channel, &myReply, &myRepLen) == FALSE) { *reply = "RpcOut: Unable to receive the result of the RPCI command"; *repLen = strlen(*reply); return FALSE; } if (myRepLen < 2 || ( (success = strncmp((const char *)myReply, "1 ", 2) == 0) == FALSE && strncmp((const char *)myReply, "0 ", 2))) { *reply = "RpcOut: Invalid format for the result of the RPCI command"; *repLen = strlen(*reply); return FALSE; } *rpcStatus = success; *reply = ((const char *)myReply) + 2; *repLen = myRepLen - 2; return TRUE; } /* *----------------------------------------------------------------------------- * * RpcOut_stop -- * * Close the channel * * Result * TRUE on success * FALSE on failure * * Side-effects * Frees the result of the last command. * *----------------------------------------------------------------------------- */ Bool RpcOut_stop(RpcOut *out) // IN { Bool status = TRUE; ASSERT(out != NULL); if (out->started) { /* Try to close the channel */ if (Message_CloseAllocated(&out->channel) == FALSE) { Debug("RpcOut: couldn't close channel\n"); status = FALSE; } out->started = FALSE; } return status; } /* *----------------------------------------------------------------------------- * * RpcOut_sendOne -- * * Make VMware execute a RPCI command * * VMware closes a channel when it detects that there has been no activity * on it for a while. Because we do not know how often this program will * make VMware execute a RPCI, we open/close one channel per RPCI command * * Return value: * TRUE on success. '*reply' contains an allocated result of the rpc * FALSE on error. '*reply' contains an allocated description of the error * or NULL. * * Side effects: * None * *----------------------------------------------------------------------------- */ Bool RpcOut_sendOne(char **reply, // OUT: Result size_t *repLen, // OUT: Length of the result char const *reqFmt, // IN: RPCI command ...) // Unspecified { va_list args; Bool status; char *request; size_t reqLen = 0; status = FALSE; /* Format the request string */ va_start(args, reqFmt); request = Str_Vasprintf(&reqLen, reqFmt, args); va_end(args); /* * If Str_Vasprintf failed, write NULL into the reply if the caller wanted * a reply back. */ if (request == NULL) { if (reply) { *reply = NULL; } return FALSE; } /* * If the command doesn't contain a space, add one to the end to maintain * compatibility with old VMXs. * * For a long time, the GuestRpc logic in the VMX was wired to expect a * trailing space in every command, even commands without arguments. That is * no longer true, but we must continue to add a trailing space because we * don't know whether we're talking to an old or new VMX. */ if (strchr(request, ' ') == NULL) { char *tmp; tmp = Str_Asprintf(NULL, "%s ", request); free(request); request = tmp; /* * If Str_Asprintf failed, write NULL into reply if the caller wanted * a reply back. */ if (request == NULL) { if (reply != NULL) { *reply = NULL; } return FALSE; } } status = RpcOut_SendOneRaw(request, reqLen, reply, repLen); free(request); return status; } /* *----------------------------------------------------------------------------- * * RpcOutSendOneRawWork -- * * Helper function to make VMware execute a RPCI command. See * RpcOut_SendOneRaw and RpcOut_SendOneRawPreallocated. * *----------------------------------------------------------------------------- */ static Bool RpcOutSendOneRawWork(void *request, // IN: RPCI command size_t reqLen, // IN: Size of request buffer char *callerReply, // IN: caller supplied reply buffer size_t callerReplyLen, // IN: size of caller supplied buf char **reply, // OUT: Result size_t *repLen) // OUT: Length of the result { Bool status; Bool rpcStatus; /* Stack allocate so this can be used in kernel logging. See 1389199. */ RpcOut out; char const *myReply; size_t myRepLen; Debug("Rpci: Sending request='%s'\n", (char *)request); RpcOutInitialize(&out); if (!RpcOut_startWithReceiveBuffer(&out, callerReply, callerReplyLen)) { myReply = "RpcOut: Unable to open the communication channel"; myRepLen = strlen(myReply); if (callerReply != NULL) { unsigned s = MIN(callerReplyLen - 1, myRepLen); ASSERT(reply == NULL); memcpy(callerReply, myReply, s); callerReply[s] = '\0'; } return FALSE; } status = RpcOut_send(&out, request, reqLen, &rpcStatus, &myReply, &myRepLen); /* On failure, we already have the description of the error */ Debug("Rpci: Sent request='%s', reply='%s', len=%"FMTSZ"u, " "status=%d, rpcStatus=%d\n", (char *)request, myReply, myRepLen, status, rpcStatus); if (reply != NULL) { /* * If we got a non-NULL reply, make a copy of it, because the reply * we got back is inside the channel buffer, which will get destroyed * at the end of this function. */ if (myReply != NULL) { /* * We previously used strdup to duplicate myReply, but that * breaks if you are sending binary (not string) data over the * backdoor. Don't assume the data is a string. * * myRepLen is strlen(myReply), so we need an extra byte to * cover the NUL terminator. */ *reply = malloc(myRepLen + 1); if (*reply != NULL) { memcpy(*reply, myReply, myRepLen); /* * The message layer already writes a trailing NUL but we might * change that someday, so do it again here. */ (*reply)[myRepLen] = 0; } } else { /* * Our reply was NULL, so just pass the NULL back up to the caller. */ *reply = NULL; } /* * Only set the length if the caller wanted it and if we got a good * reply. */ if (repLen != NULL && *reply != NULL) { *repLen = myRepLen; } } if (RpcOut_stop(&out) == FALSE) { /* * We couldn't stop the channel. Free anything we allocated, give our * client a reply of NULL, and return FALSE. */ if (reply != NULL) { free(*reply); *reply = NULL; } Debug("Rpci: unable to close the communication channel\n"); status = FALSE; } return status && rpcStatus; } /* *----------------------------------------------------------------------------- * * RpcOut_SendOneRaw -- * * Make VMware execute a RPCI command * * VMware closes a channel when it detects that there has been no activity * on it for a while. Because we do not know how often this program will * make VMware execute a RPCI, we open/close one channel per RPCI command. * * This function sends a message over the backdoor without using * any of the Str_ functions on the request buffer; Str_Asprintf() in * particular uses FormatMessage on Win32, which corrupts some UTF-8 * strings. Using this function directly instead of using RpcOut_SendOne() * avoids these problems. * * If this is not an issue, you can use RpcOut_sendOne(), which has * varargs. * * Note: It is the caller's responsibility to ensure that the RPCI command * followed by a space appear at the start of the request buffer. See * the command in RpcOut_sendOne for details. * * Return value: * TRUE on success. '*reply' contains an allocated result of the rpc * FALSE on error. '*reply' contains an allocated description of the * error or NULL. * * * Side effects: * None * *----------------------------------------------------------------------------- */ Bool RpcOut_SendOneRaw(void *request, // IN: RPCI command size_t reqLen, // IN: Size of request buffer char **reply, // OUT: Result size_t *repLen) // OUT: Length of the result { return RpcOutSendOneRawWork(request, reqLen, NULL, 0, reply, repLen); } /* *----------------------------------------------------------------------------- * * RpcOut_SendOneRawPreallocated -- * * Make VMware execute a RPCI command. * * A variant of RpcOut_SendOneRaw in which the caller supplies the * receive buffer instead of dynamically allocating it. This * allows the caller to call this function in situations where * malloc is not allowed. But if the response from the host is too * large the rpc will fail instead of attempting to grow its own * receive buffer. * * * Return value: * TRUE on success. 'reply' contains an allocated result of the rpc * FALSE on error. 'reply' contains an allocated description of the * error or NULL. * * * Side effects: * None * *----------------------------------------------------------------------------- */ Bool RpcOut_SendOneRawPreallocated(void *request, // IN: RPCI command size_t reqLen, // IN: Size of request buffer char *reply, // OUT: Result size_t repLen) // IN: Length of the result { return RpcOutSendOneRawWork(request, reqLen, reply, repLen, NULL, NULL); } vmhgfs-only/message.c 0000444 0000000 0000000 00000042265 13432725330 013626 0 ustar root root /********************************************************* * Copyright (C) 1999-2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * message.c -- * * Second layer of the internal communication channel between guest * applications and vmware * * Build a generic messaging system between guest applications and vmware. * * The protocol is not completely symmetrical, because: * . basic requests can only be sent by guest applications (when vmware * wants to post a message to a guest application, the message will be * really fetched only when the guest application will poll for new * available messages) * . several guest applications can talk to vmware, while the contrary is * not true * * Operations that are not atomic (in terms of number of backdoor calls) * can be aborted by vmware if a checkpoint/restore occurs in the middle of * such an operation. This layer takes care of retrying those operations. */ #include "backdoor_def.h" #include "guest_msg_def.h" #ifdef __cplusplus extern "C" { #endif #if defined(__KERNEL__) || defined(_KERNEL) || defined(KERNEL) # include "kernelStubs.h" #else # include <stdio.h> # include <stdlib.h> # include "debug.h" #endif #include "backdoor.h" #include "message.h" #if defined(MESSAGE_DEBUG) # define MESSAGE_LOG(...) Warning(__VA_ARGS__) #else # define MESSAGE_LOG(...) #endif /* *----------------------------------------------------------------------------- * * Message_OpenAllocated -- * * Open a communication channel using an allocated, but unitialized * Message_Channel structure. A receive buffer may be optionally * specified with a given size. If a message larger than this * buffer is received the communication will be aborted. If no * receiver buffer is specified, one will be dynamically allocated * to size. When finished with the channel, Message_CloseAllocated * should be called. * * Result: * TRUE on success, FALSE on failure. * * Side-effects: * See above. * *----------------------------------------------------------------------------- */ Bool Message_OpenAllocated(uint32 proto, Message_Channel *chan, char *receiveBuffer, size_t receiveBufferSize) { uint32 flags; Backdoor_proto bp; flags = GUESTMSG_FLAG_COOKIE; retry: /* IN: Type */ bp.in.cx.halfs.high = MESSAGE_TYPE_OPEN; /* IN: Magic number of the protocol and flags */ bp.in.size = proto | flags; bp.in.cx.halfs.low = BDOOR_CMD_MESSAGE; Backdoor(&bp); /* OUT: Status */ if ((bp.in.cx.halfs.high & MESSAGE_STATUS_SUCCESS) == 0) { if (flags) { /* Cookies not supported. Fall back to no cookie. --hpreg */ flags = 0; goto retry; } MESSAGE_LOG("Message: Unable to open a communication channel\n"); return FALSE; } /* OUT: Id and cookie */ chan->id = bp.in.dx.halfs.high; chan->cookieHigh = bp.out.si.word; chan->cookieLow = bp.out.di.word; /* Initialize the channel */ chan->in = (unsigned char *)receiveBuffer; chan->inAlloc = receiveBufferSize; ASSERT((receiveBuffer == NULL) == (receiveBufferSize == 0)); chan->inPreallocated = receiveBuffer != NULL; return TRUE; } /* *----------------------------------------------------------------------------- * * Message_Open -- * * Open a communication channel * * Result: * An allocated Message_Channel on success * NULL on failure * * Side-effects: * None * *----------------------------------------------------------------------------- */ Message_Channel * Message_Open(uint32 proto) // IN { Message_Channel *chan = malloc(sizeof *chan); if (chan != NULL && !Message_OpenAllocated(proto, chan, NULL, 0)) { free(chan); chan = NULL; } return chan; } /* *----------------------------------------------------------------------------- * * Message_Send -- * * Send a message over a communication channel * * Result: * TRUE on success * FALSE on failure (the message is discarded by vmware) * * Side-effects: * None * *----------------------------------------------------------------------------- */ Bool Message_Send(Message_Channel *chan, // IN/OUT const unsigned char *buf, // IN size_t bufSize) // IN { const unsigned char *myBuf; size_t myBufSize; Backdoor_proto bp; retry: myBuf = buf; myBufSize = bufSize; /* * Send the size. */ /* IN: Type */ bp.in.cx.halfs.high = MESSAGE_TYPE_SENDSIZE; /* IN: Id and cookie */ bp.in.dx.halfs.high = chan->id; bp.in.si.word = chan->cookieHigh; bp.in.di.word = chan->cookieLow; /* IN: Size */ bp.in.size = myBufSize; bp.in.cx.halfs.low = BDOOR_CMD_MESSAGE; Backdoor(&bp); /* OUT: Status */ if ((bp.in.cx.halfs.high & MESSAGE_STATUS_SUCCESS) == 0) { MESSAGE_LOG("Message: Unable to send a message over the communication " "channel %u\n", chan->id); return FALSE; } if (bp.in.cx.halfs.high & MESSAGE_STATUS_HB) { /* * High-bandwidth backdoor port supported. Send the message in one * backdoor operation. --hpreg */ if (myBufSize) { Backdoor_proto_hb bphb; bphb.in.bx.halfs.low = BDOORHB_CMD_MESSAGE; bphb.in.bx.halfs.high = MESSAGE_STATUS_SUCCESS; bphb.in.dx.halfs.high = chan->id; bphb.in.bp.word = chan->cookieHigh; bphb.in.dstAddr = chan->cookieLow; bphb.in.size = myBufSize; bphb.in.srcAddr = (uintptr_t) myBuf; Backdoor_HbOut(&bphb); if ((bphb.in.bx.halfs.high & MESSAGE_STATUS_SUCCESS) == 0) { if ((bphb.in.bx.halfs.high & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry the operation. --hpreg */ goto retry; } MESSAGE_LOG("Message: Unable to send a message over the " "communication channel %u\n", chan->id); return FALSE; } } } else { /* * High-bandwidth backdoor port not supported. Send the message, 4 bytes * at a time. --hpreg */ for (;;) { if (myBufSize == 0) { /* We are done */ break; } /* IN: Type */ bp.in.cx.halfs.high = MESSAGE_TYPE_SENDPAYLOAD; /* IN: Id and cookie */ bp.in.dx.halfs.high = chan->id; bp.in.si.word = chan->cookieHigh; bp.in.di.word = chan->cookieLow; /* IN: Piece of message */ /* * Beware in case we are not allowed to read extra bytes beyond the * end of the buffer. */ switch (myBufSize) { case 1: bp.in.size = myBuf[0]; myBufSize -= 1; break; case 2: bp.in.size = myBuf[0] | myBuf[1] << 8; myBufSize -= 2; break; case 3: bp.in.size = myBuf[0] | myBuf[1] << 8 | myBuf[2] << 16; myBufSize -= 3; break; default: bp.in.size = *(const uint32 *)myBuf; myBufSize -= 4; break; } bp.in.cx.halfs.low = BDOOR_CMD_MESSAGE; Backdoor(&bp); /* OUT: Status */ if ((bp.in.cx.halfs.high & MESSAGE_STATUS_SUCCESS) == 0) { if ((bp.in.cx.halfs.high & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry the operation. --hpreg */ goto retry; } MESSAGE_LOG("Message: Unable to send a message over the " "communication channel %u\n", chan->id); return FALSE; } myBuf += 4; } } return TRUE; } /* *----------------------------------------------------------------------------- * * Message_Receive -- * * If vmware has posted a message for this channel, retrieve it * * Result: * TRUE on success (bufSize is 0 if there is no message) * FALSE on failure * * Side-effects: * None * *----------------------------------------------------------------------------- */ Bool Message_Receive(Message_Channel *chan, // IN/OUT unsigned char **buf, // OUT size_t *bufSize) // OUT { Backdoor_proto bp; size_t myBufSize; unsigned char *myBuf; retry: /* * Is there a message waiting for our retrieval? */ /* IN: Type */ bp.in.cx.halfs.high = MESSAGE_TYPE_RECVSIZE; /* IN: Id and cookie */ bp.in.dx.halfs.high = chan->id; bp.in.si.word = chan->cookieHigh; bp.in.di.word = chan->cookieLow; bp.in.cx.halfs.low = BDOOR_CMD_MESSAGE; Backdoor(&bp); /* OUT: Status */ if ((bp.in.cx.halfs.high & MESSAGE_STATUS_SUCCESS) == 0) { MESSAGE_LOG("Message: Unable to poll for messages over the " "communication channel %u\n", chan->id); return FALSE; } if ((bp.in.cx.halfs.high & MESSAGE_STATUS_DORECV) == 0) { /* No message to retrieve */ *bufSize = 0; return TRUE; } /* * Receive the size. */ /* OUT: Type */ if (bp.in.dx.halfs.high != MESSAGE_TYPE_SENDSIZE) { MESSAGE_LOG("Message: Protocol error. Expected a " "MESSAGE_TYPE_SENDSIZE request from vmware\n"); return FALSE; } /* OUT: Size */ myBufSize = bp.out.bx.word; /* * Allocate an extra byte for a trailing NUL character. The code that will * deal with this message may not know about binary strings, and may expect * a C string instead. --hpreg */ if (myBufSize + 1 > chan->inAlloc) { if (chan->inPreallocated) { MESSAGE_LOG("Message: Buffer too small to receive a message over " "the communication channel %u\n", chan->id); goto error_quit; } else { myBuf = (unsigned char *)realloc(chan->in, myBufSize + 1); if (myBuf == NULL) { MESSAGE_LOG("Message: Not enough memory to receive a message over " "the communication channel %u\n", chan->id); goto error_quit; } chan->in = myBuf; chan->inAlloc = myBufSize + 1; } } *bufSize = myBufSize; myBuf = *buf = chan->in; if (bp.in.cx.halfs.high & MESSAGE_STATUS_HB) { /* * High-bandwidth backdoor port supported. Receive the message in one * backdoor operation. --hpreg */ if (myBufSize) { Backdoor_proto_hb bphb; bphb.in.bx.halfs.low = BDOORHB_CMD_MESSAGE; bphb.in.bx.halfs.high = MESSAGE_STATUS_SUCCESS; bphb.in.dx.halfs.high = chan->id; bphb.in.srcAddr = chan->cookieHigh; bphb.in.bp.word = chan->cookieLow; bphb.in.size = myBufSize; bphb.in.dstAddr = (uintptr_t) myBuf; Backdoor_HbIn(&bphb); if ((bphb.in.bx.halfs.high & MESSAGE_STATUS_SUCCESS) == 0) { if ((bphb.in.bx.halfs.high & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry the operation. --hpreg */ goto retry; } MESSAGE_LOG("Message: Unable to receive a message over the " "communication channel %u\n", chan->id); goto error_quit; } } } else { /* * High-bandwidth backdoor port not supported. Receive the message, 4 * bytes at a time. --hpreg */ for (;;) { if (myBufSize == 0) { /* We are done */ break; } /* IN: Type */ bp.in.cx.halfs.high = MESSAGE_TYPE_RECVPAYLOAD; /* IN: Id and cookie */ bp.in.dx.halfs.high = chan->id; bp.in.si.word = chan->cookieHigh; bp.in.di.word = chan->cookieLow; /* IN: Status for the previous request (that succeeded) */ bp.in.size = MESSAGE_STATUS_SUCCESS; bp.in.cx.halfs.low = BDOOR_CMD_MESSAGE; Backdoor(&bp); /* OUT: Status */ if ((bp.in.cx.halfs.high & MESSAGE_STATUS_SUCCESS) == 0) { if ((bp.in.cx.halfs.high & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry the operation. --hpreg */ goto retry; } MESSAGE_LOG("Message: Unable to receive a message over the " "communication channel %u\n", chan->id); goto error_quit; } /* OUT: Type */ if (bp.in.dx.halfs.high != MESSAGE_TYPE_SENDPAYLOAD) { MESSAGE_LOG("Message: Protocol error. Expected a " "MESSAGE_TYPE_SENDPAYLOAD from vmware\n"); goto error_quit; } /* OUT: Piece of message */ /* * Beware in case we are not allowed to write extra bytes beyond the * end of the buffer. --hpreg */ switch (myBufSize) { case 1: myBuf[0] = bp.out.bx.word & 0xff; myBufSize -= 1; break; case 2: myBuf[0] = bp.out.bx.word & 0xff; myBuf[1] = (bp.out.bx.word >> 8) & 0xff; myBufSize -= 2; break; case 3: myBuf[0] = bp.out.bx.word & 0xff; myBuf[1] = (bp.out.bx.word >> 8) & 0xff; myBuf[2] = (bp.out.bx.word >> 16) & 0xff; myBufSize -= 3; break; default: *(uint32 *)myBuf = bp.out.bx.word; myBufSize -= 4; break; } myBuf += 4; } } /* Write a trailing NUL just after the message. --hpreg */ chan->in[*bufSize] = '\0'; /* IN: Type */ bp.in.cx.halfs.high = MESSAGE_TYPE_RECVSTATUS; /* IN: Id and cookie */ bp.in.dx.halfs.high = chan->id; bp.in.si.word = chan->cookieHigh; bp.in.di.word = chan->cookieLow; /* IN: Status for the previous request (that succeeded) */ bp.in.size = MESSAGE_STATUS_SUCCESS; bp.in.cx.halfs.low = BDOOR_CMD_MESSAGE; Backdoor(&bp); /* OUT: Status */ if ((bp.in.cx.halfs.high & MESSAGE_STATUS_SUCCESS) == 0) { if ((bp.in.cx.halfs.high & MESSAGE_STATUS_CPT) != 0) { /* A checkpoint occurred. Retry the operation. --hpreg */ goto retry; } MESSAGE_LOG("Message: Unable to receive a message over the " "communication channel %u\n", chan->id); goto error_quit; } return TRUE; error_quit: /* IN: Type */ if (myBufSize == 0) { bp.in.cx.halfs.high = MESSAGE_TYPE_RECVSTATUS; } else { bp.in.cx.halfs.high = MESSAGE_TYPE_RECVPAYLOAD; } /* IN: Id and cookie */ bp.in.dx.halfs.high = chan->id; bp.in.si.word = chan->cookieHigh; bp.in.di.word = chan->cookieLow; /* IN: Status for the previous request (that failed) */ bp.in.size = 0; bp.in.cx.halfs.low = BDOOR_CMD_MESSAGE; Backdoor(&bp); /* OUT: Status */ if ((bp.in.cx.halfs.high & MESSAGE_STATUS_SUCCESS) == 0) { MESSAGE_LOG("Message: Unable to signal an error of reception over the " "communication channel %u\n", chan->id); return FALSE; } return FALSE; } /* *----------------------------------------------------------------------------- * * Message_CloseAllocated -- * * Close a communication channel that had been allocated by the * caller. (For use with Message_OpenAllocated.) * * Result: * TRUE on success, the channel is destroyed * FALSE on failure * * Side-effects: * None * *----------------------------------------------------------------------------- */ Bool Message_CloseAllocated(Message_Channel *chan) // IN/OUT { Backdoor_proto bp; Bool ret = TRUE; /* IN: Type */ bp.in.cx.halfs.high = MESSAGE_TYPE_CLOSE; /* IN: Id and cookie */ bp.in.dx.halfs.high = chan->id; bp.in.si.word = chan->cookieHigh; bp.in.di.word = chan->cookieLow; bp.in.cx.halfs.low = BDOOR_CMD_MESSAGE; Backdoor(&bp); /* OUT: Status */ if ((bp.in.cx.halfs.high & MESSAGE_STATUS_SUCCESS) == 0) { MESSAGE_LOG("Message: Unable to close the communication channel %u\n", chan->id); ret = FALSE; } if (!chan->inPreallocated) { free(chan->in); } chan->in = NULL; return ret; } /* *----------------------------------------------------------------------------- * * Message_Close -- * * Close a communication channel. * * Result: * TRUE on success, the channel is destroyed * FALSE on failure * * Side-effects: * None * *----------------------------------------------------------------------------- */ Bool Message_Close(Message_Channel *chan) // IN/OUT { Bool ret = Message_CloseAllocated(chan); free(chan); return ret; } #ifdef __cplusplus } #endif vmhgfs-only/stubs.c 0000444 0000000 0000000 00000003706 13432725306 013342 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * stubs.c * * Contains stubs and helper functions. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include "kernelStubs.h" #include "module.h" #include "vm_assert.h" /* *---------------------------------------------------------------------- * * Debug -- * * If debugging is enabled, output debug information. * * Result * None * * Side-effects * None * *---------------------------------------------------------------------- */ void Debug(char const *fmt, // IN: Format string ...) // IN: Arguments { va_list args; int numBytes; static char out[128]; va_start(args, fmt); numBytes = Str_Vsnprintf(out, sizeof out, fmt, args); va_end(args); if (numBytes > 0) { LOG(6, (KERN_DEBUG "VMware hgfs: %s", out)); } } /* *---------------------------------------------------------------------- * * Log -- * * Needs to be defined. * * Result * None * * Side-effects * None * *---------------------------------------------------------------------- */ void Log(const char *string, ...) { // do nothing. } vmhgfs-only/dentry.c 0000444 0000000 0000000 00000006451 13432725306 013507 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * dentry.c -- * * Dentry operations for the filesystem portion of the vmhgfs driver. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include "compat_fs.h" #include "compat_kernel.h" #include "compat_namei.h" #include "compat_version.h" #include "inode.h" #include "module.h" #include "vm_assert.h" /* HGFS dentry operations. */ static int HgfsDentryRevalidate(struct dentry *dentry, #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) unsigned int flags #else struct nameidata *nd #endif ); /* HGFS dentry operations structure. */ struct dentry_operations HgfsDentryOperations = { .d_revalidate = HgfsDentryRevalidate, }; /* * HGFS dentry operations. */ /* *---------------------------------------------------------------------- * * HgfsDentryRevalidate -- * * Called by namei.c every time a dentry is looked up in the dcache * to determine if it is still valid. * * If the entry is found to be invalid, namei calls dput on it and * returns NULL, which causes a new lookup to be done in the actual * filesystem, which in our case means that HgfsLookup is called. * * Results: * Positive value if the entry IS valid. * Zero if the entry is NOT valid. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsDentryRevalidate(struct dentry *dentry, // IN: Dentry to revalidate #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) unsigned int flags // IN: Lookup flags & intent #else struct nameidata *nd // IN: Lookup flags & intent #endif ) { int error; LOG(6, (KERN_DEBUG "VMware hgfs: HgfsDentryRevalidate: calling " "HgfsRevalidate\n")); ASSERT(dentry); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) if (flags & LOOKUP_RCU) { return -ECHILD; } #elif defined(LOOKUP_RCU) /* Introduced in 2.6.38 */ if (nd && (nd->flags & LOOKUP_RCU)) { return -ECHILD; } #endif /* Just call HgfsRevaliate, which does the right thing. */ error = HgfsRevalidate(dentry); if (error) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDentryRevalidate: invalid\n")); if (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode)) { shrink_dcache_parent(dentry); } d_drop(dentry); return 0; } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsDentryRevalidate: valid\n")); return 1; } vmhgfs-only/filesystem.c 0000444 0000000 0000000 00000065703 13432725306 014373 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * filesystem.c -- * * High-level filesystem operations for the filesystem portion of * the vmhgfs driver. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include <asm/atomic.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/module.h> #include <linux/pagemap.h> #include "compat_cred.h" #include "compat_dcache.h" #include "compat_fs.h" #include "compat_kernel.h" #include "compat_sched.h" #include "compat_semaphore.h" #include "compat_slab.h" #include "compat_spinlock.h" #include "compat_string.h" #include "compat_uaccess.h" #include "compat_version.h" #include "filesystem.h" #include "transport.h" #include "hgfsDevLinux.h" #include "hgfsProto.h" #include "hgfsUtil.h" #include "module.h" #include "request.h" #include "fsutil.h" #include "vm_assert.h" #include "vm_basic_types.h" #include "rpcout.h" #include "hgfs.h" /* Synchronization primitives. */ DEFINE_SPINLOCK(hgfsBigLock); /* Other variables. */ compat_kmem_cache *hgfsInodeCache; /* Global protocol version switch. */ HgfsOp hgfsVersionOpen; HgfsOp hgfsVersionRead; HgfsOp hgfsVersionWrite; HgfsOp hgfsVersionClose; HgfsOp hgfsVersionSearchOpen; HgfsOp hgfsVersionSearchRead; HgfsOp hgfsVersionSearchClose; HgfsOp hgfsVersionGetattr; HgfsOp hgfsVersionSetattr; HgfsOp hgfsVersionCreateDir; HgfsOp hgfsVersionDeleteFile; HgfsOp hgfsVersionDeleteDir; HgfsOp hgfsVersionRename; HgfsOp hgfsVersionQueryVolumeInfo; HgfsOp hgfsVersionCreateSymlink; /* Private functions. */ static inline unsigned long HgfsComputeBlockBits(unsigned long blockSize); static compat_kmem_cache_ctor HgfsInodeCacheCtor; static HgfsSuperInfo *HgfsInitSuperInfo(void *rawData, uint32 mountInfoVersion); static int HgfsReadSuper(struct super_block *sb, void *rawData, int flags); static void HgfsResetOps(void); /* HGFS filesystem high-level operations. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) static struct dentry *HgfsMount(struct file_system_type *fs_type, int flags, const char *dev_name, void *rawData); #elif defined(VMW_GETSB_2618) static int HgfsGetSb(struct file_system_type *fs_type, int flags, const char *dev_name, void *rawData, struct vfsmount *mnt); #else static struct super_block *HgfsGetSb(struct file_system_type *fs_type, int flags, const char *dev_name, void *rawData); #endif /* HGFS filesystem type structure. */ static struct file_system_type hgfsType = { .owner = THIS_MODULE, .name = HGFS_NAME, .fs_flags = FS_BINARY_MOUNTDATA, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) .mount = HgfsMount, #else .get_sb = HgfsGetSb, #endif .kill_sb = kill_anon_super, }; /* * Private functions implementations. */ /* *----------------------------------------------------------------------------- * * HgfsComputeBlockBits -- * * Given a block size, returns the number of bits in the block, rounded * down. This approach of computing the number of bits per block and * saving it for later use is the same used in NFS. * * Results: * The number of bits in the block. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static inline unsigned long HgfsComputeBlockBits(unsigned long blockSize) { uint8 numBits; for (numBits = 31; numBits && !(blockSize & (1 << numBits)); numBits--); return numBits; } /* *----------------------------------------------------------------------------- * * HgfsInodeCacheCtor -- * * Constructor for HGFS inode structures that runs once at slab * allocation. It is called once for each piece of memory that * is used to satisfy HGFS inode allocations; it should only be * used to initialize items that will naturally return to their * initialized state before deallocation (such as locks, list_heads). * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static void HgfsInodeCacheCtor(COMPAT_KMEM_CACHE_CTOR_ARGS(slabElem)) // IN: slab item to initialize { HgfsInodeInfo *iinfo = (HgfsInodeInfo *)slabElem; /* * VFS usually calls this as part of allocating inodes for us, but since * we're doing the allocation now, we need to call it. It'll set up * much of the VFS inode members. */ inode_init_once(&iinfo->inode); } /* *----------------------------------------------------------------------------- * * HgfsValidateMountInfo -- * * Validate the the user mode mounter information. * * Results: * Zero on success or -EINVAL if we pass in an unknown version. * * Side effects: * None * *----------------------------------------------------------------------------- */ static int HgfsValidateMountInfo(void *rawData, // IN: Fs-specific mount data uint32 *mountInfoVersion) // OUT: Mount flags { HgfsMountInfoV1 *infoV1; HgfsMountInfo *info; uint32 *magicNumber; int retVal = -EINVAL; ASSERT(mountInfoVersion); /* Sanity check the incoming user data. */ if (rawData == NULL) { printk(KERN_WARNING LGPFX "%s: error: no user supplied mount data\n", __func__); goto exit; } /* Magic number is always first 4 bytes of the header. */ magicNumber = rawData; if (*magicNumber != HGFS_SUPER_MAGIC) { printk(KERN_WARNING LGPFX "%s: error: user supplied mount data is not valid!\n", __func__); goto exit; } /* * Looks like HGFS data, now validate the version so that we can * proceed and extract the required settings from the user. */ info = rawData; infoV1 = rawData; if ((info->version == HGFS_MOUNTINFO_VERSION_1 || info->version == HGFS_MOUNTINFO_VERSION_2) && info->infoSize == sizeof *info) { /* * The current version is validated with the size and magic number. * Note the version can be either 1 or 2 as it was not bumped initially. * Furthermore, return the version as HGFS_MOUNTINFO_VERSION_2 only since * the objects are the same and it simplifies field extractions. */ LOG(4, (KERN_DEBUG LGPFX "%s: mount data version %d passed\n", __func__, info->version)); *mountInfoVersion = HGFS_MOUNTINFO_VERSION_2; retVal = 0; } else if (infoV1->version == HGFS_MOUNTINFO_VERSION_1) { /* * The version 1 is validated with the version and magic number. * Note the version can be only be 1 and if so does not collide with version 2 of * the header (which would be the info size field). */ LOG(4, (KERN_DEBUG LGPFX "%s: mount data version %d passed\n", __func__, infoV1->version)); *mountInfoVersion = infoV1->version; retVal = 0; } else { /* * The version and info size fields could not be validated * for the known structure. It is probably a newer version. */ printk(KERN_WARNING LGPFX "%s: error: user supplied mount data version %d\n", __func__, infoV1->version); } exit: return retVal; } /* *----------------------------------------------------------------------------- * * HgfsGetMountInfoV1 -- * * Gets the fields of interest from the user mode mounter version 1. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static void HgfsGetMountInfoV1(HgfsMountInfoV1 *mountInfo, // IN: version 1 mount data uint32 *mntFlags, // OUT: Mount flags uint32 *ttl, // OUT: seconds until revalidate uid_t *uid, // OUT: owner gid_t *gid, // OUT: group mode_t *fmask, // OUT: file mask mode_t *dmask, // OUT: directory mask const char **shareHost, // OUT: share host name const char **shareDir) // OUT: share directory { ASSERT(mountInfo); *mntFlags = 0; /* * If the mounter specified a uid or gid, we will prefer them over any uid * or gid given to us by the server. */ if (mountInfo->uidSet) { *mntFlags |= HGFS_MNT_SET_UID; *uid = mountInfo->uid; } if (mountInfo->gidSet) { *mntFlags |= HGFS_MNT_SET_GID; *gid = mountInfo->gid; } *fmask = mountInfo->fmask; *dmask = mountInfo->dmask; *ttl = mountInfo->ttl; *shareHost = mountInfo->shareNameHost; *shareDir = mountInfo->shareNameDir; } /* *----------------------------------------------------------------------------- * * HgfsGetMountInfoV2 -- * * Gets the fields of interest from the user mode mounter version 2. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static void HgfsGetMountInfoV2(HgfsMountInfo *mountInfo, // IN: version 2 mount data uint32 *mntFlags, // OUT: Mount flags uint32 *ttl, // OUT: seconds until revalidate uid_t *uid, // OUT: owner gid_t *gid, // OUT: group mode_t *fmask, // OUT: file mask mode_t *dmask, // OUT: directory mask const char **shareHost, // OUT: share host name const char **shareDir) // OUT: share directory { ASSERT(mountInfo); *mntFlags = 0; if ((mountInfo->flags & HGFS_MNTINFO_SERVER_INO) != 0) { *mntFlags |= HGFS_MNT_SERVER_INUM; } /* * If the mounter specified a uid or gid, we will prefer them over any uid * or gid given to us by the server. */ if (mountInfo->uidSet) { *mntFlags |= HGFS_MNT_SET_UID; *uid = mountInfo->uid; } if (mountInfo->gidSet) { *mntFlags |= HGFS_MNT_SET_GID; *gid = mountInfo->gid; } *fmask = mountInfo->fmask; *dmask = mountInfo->dmask; *ttl = mountInfo->ttl; *shareHost = mountInfo->shareNameHost; *shareDir = mountInfo->shareNameDir; } /* *----------------------------------------------------------------------------- * * HgfsGetMountInfo -- * * Gets the fields of interest from the user mode mounter. * * Results: * Zero on success or -EINVAL if we pass in an unknown version. * * Side effects: * None * *----------------------------------------------------------------------------- */ static int HgfsGetMountInfo(void *rawData, // IN: Fs-specific mount data uint32 mountInfoVersion, // IN: mount information version uint32 *mntFlags, // OUT: Mount flags uint32 *ttl, // OUT: seconds until revalidate uid_t *uid, // OUT: owner gid_t *gid, // OUT: group mode_t *fmask, // OUT: file mask mode_t *dmask, // OUT: directory mask const char **shareHost, // OUT: share host name const char **shareDir) // OUT: share path { int result = 0; switch (mountInfoVersion) { case HGFS_MOUNTINFO_VERSION_1: HgfsGetMountInfoV1(rawData, mntFlags, ttl, uid, gid, fmask, dmask, shareHost, shareDir); break; case HGFS_MOUNTINFO_VERSION_2: HgfsGetMountInfoV2(rawData, mntFlags, ttl, uid, gid, fmask, dmask, shareHost, shareDir); break; default: ASSERT(FALSE); result = -EINVAL; } return result; } /* *---------------------------------------------------------------------- * * HgfsInitSuperInfo -- * * Allocate and initialize a new HgfsSuperInfo object * * Results: * Returns a new HgfsSuperInfo object with all its fields initialized, * or an error code cast as a pointer. * * Side effects: * None * *---------------------------------------------------------------------- */ static HgfsSuperInfo * HgfsInitSuperInfo(void *rawData, // IN: Passed down from the user uint32 mountInfoVersion) // IN: version { HgfsSuperInfo *si = NULL; int result = 0; int len; char *tmpName = NULL; Bool hostValid; uint32 mntFlags = 0; uint32 ttl = 0; uid_t uid = 0; gid_t gid = 0; mode_t fmask = 0; mode_t dmask = 0; const char *shareHost; const char *shareDir; si = kmalloc(sizeof *si, GFP_KERNEL); if (!si) { result = -ENOMEM; goto out_error_si; } memset(si, 0, sizeof *si); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) result = bdi_setup_and_register(&si->bdi, HGFS_NAME); if (result) { LOG(6, (KERN_DEBUG "VMware hgfs: %s: initialize backing device info" "failed. (%d)\n", __func__, result)); goto out_error_si; } #endif result = HgfsGetMountInfo(rawData, mountInfoVersion, &mntFlags, &ttl, &uid, &gid, &fmask, &dmask, &shareHost, &shareDir); if (result < 0) { LOG(6, (KERN_DEBUG LGPFX "%s: error: get mount info %d\n", __func__, result)); goto out_error_last; } /* * Initialize with the default flags. */ si->mntFlags = mntFlags; si->uid = current_uid(); if ((si->mntFlags & HGFS_MNT_SET_UID) != 0) { kuid_t mntUid = make_kuid(current_user_ns(), uid); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) if (uid_valid(mntUid)) #endif si->uid = mntUid; } si->gid = current_gid(); if ((si->mntFlags & HGFS_MNT_SET_GID) != 0) { kgid_t mntGid = make_kgid(current_user_ns(), gid); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) if (gid_valid(mntGid)) #endif si->gid = mntGid; } si->fmask = fmask; si->dmask = dmask; si->ttl = ttl * HZ; // in ticks /* * We don't actually care about this field (though we may care in the * future). For now, just make sure it is set to ".host" as a sanity check. * * We can't call getname() directly because on certain kernels we can't call * putname() directly. For more details, see the change description of * change 464782 or the second comment in bug 159623, which fixed the same * problem for vmblock. */ tmpName = compat___getname(); if (!tmpName) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsInitSuperInfo: could not obtain " "memory for filename\n")); result = -ENOMEM; goto out_error_last; } len = strncpy_from_user(tmpName, shareHost, PATH_MAX); if (len < 0 || len >= PATH_MAX) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsInitSuperInfo: strncpy_from_user " "on host string failed\n")); result = len < 0 ? len : -ENAMETOOLONG; goto out_error_last; } hostValid = strcmp(tmpName, ".host") == 0; if (!hostValid) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsInitSuperInfo: host string is " "invalid\n")); result = -EINVAL; goto out_error_last; } /* * Perform a simple sanity check on the directory portion: it must begin * with forward slash. */ len = strncpy_from_user(tmpName, shareDir, PATH_MAX); if (len < 0 || len >= PATH_MAX) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsInitSuperInfo: strncpy_from_user " "on dir string failed\n")); result = len < 0 ? len : -ENAMETOOLONG; goto out_error_last; } if (*tmpName != '/') { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsInitSuperInfo: dir string is " "invalid\n")); result = -EINVAL; goto out_error_last; } /* * The SELinux audit subsystem will delay the putname() of a string until * the end of a system call so that it may be audited at any point. At that * time, it also unconditionally calls putname() on every string allocated * by getname(). * * This means we can't safely retain strings allocated by getname() beyond * the syscall boundary. So after getting the string, use kstrdup() to * duplicate it, and store that (audit-safe) result in the SuperInfo struct. */ si->shareName = compat_kstrdup(tmpName, GFP_KERNEL); if (si->shareName == NULL) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsInitSuperInfo: kstrdup on " "dir string failed\n")); result = -ENOMEM; goto out_error_last; } si->shareNameLen = strlen(si->shareName); out_error_last: if (tmpName) { compat___putname(tmpName); } #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) if (result) { bdi_destroy(&si->bdi); } #endif out_error_si: if (result) { kfree(si); si = ERR_PTR(result); } return si; } /* *----------------------------------------------------------------------------- * * HgfsReadSuper -- * * The main entry point of the filesystem side of the driver. Called when * a userland process does a mount(2) of an hgfs filesystem. This makes the * whole driver transition from its initial state to state 1. Fill the * content of the uninitialized superblock provided by the kernel. * * 'rawData' is a pointer (that can be NULL) to a kernel buffer (whose * size is <= PAGE_SIZE) that corresponds to the filesystem-specific 'data' * argument passed to mount(2). * * Results: * zero and initialized superblock on success * negative value on failure * * Side effects: * None * *----------------------------------------------------------------------------- */ static int HgfsReadSuper(struct super_block *sb, // OUT: Superblock object void *rawData, // IN: Fs-specific mount data int flags) // IN: Mount flags { int result = 0; HgfsSuperInfo *si; struct dentry *rootDentry = NULL; uint32 mountInfoVersion; ASSERT(sb); LOG(6, (KERN_DEBUG "VMware hgfs: HgfsReadSuper: entered\n")); /* Sanity check the incoming user data. */ result = HgfsValidateMountInfo(rawData, &mountInfoVersion); if (result < 0) { return result; } /* Setup both our superblock and the VFS superblock. */ si = HgfsInitSuperInfo(rawData, mountInfoVersion); if (IS_ERR(si)) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsReadSuper: superinfo " "init failed\n")); return PTR_ERR(si); } HGFS_SET_SB_TO_COMMON(sb, si); sb->s_magic = HGFS_SUPER_MAGIC; sb->s_op = &HgfsSuperOperations; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) sb->s_d_op = &HgfsDentryOperations; #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) sb->s_bdi = &si->bdi; #endif /* * If s_maxbytes isn't initialized, the generic write path may fail. In * most kernels, s_maxbytes is initialized by the kernel's superblock * allocation routines, but in some, it's up to the filesystem to initialize * it. Note that we'll initialize it anyway, because the default value is * MAX_NON_LFS, which caps our filesize at 2^32 bytes. */ sb->s_maxbytes = MAX_LFS_FILESIZE; /* * These two operations will make sure that our block size and the bits * per block match up, no matter what HGFS_BLOCKSIZE may be. Granted, * HGFS_BLOCKSIZE will always be a power of two, but you never know! */ sb->s_blocksize_bits = HgfsComputeBlockBits(HGFS_BLOCKSIZE); sb->s_blocksize = 1 << sb->s_blocksize_bits; /* * Create the root dentry and its corresponding inode. */ result = HgfsInstantiateRoot(sb, &rootDentry); if (result) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsReadSuper: Could not instantiate " "root dentry\n")); goto exit; } sb->s_root = rootDentry; LOG(6, (KERN_DEBUG "VMware hgfs: HgfsReadSuper: finished %s\n", si->shareName)); exit: if (result) { dput(rootDentry); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) bdi_destroy(&si->bdi); sb->s_bdi = NULL; #endif kfree(si->shareName); kfree(si); } return result; } /* * HGFS filesystem high-level operations. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) /* *----------------------------------------------------------------------------- * * HgfsMount -- * * Invokes generic kernel code to mount a deviceless filesystem. * * Results: * Mount's root dentry structure on success * ERR_PTR()-encoded negative error code on failure * * Side effects: * None * *----------------------------------------------------------------------------- */ struct dentry * HgfsMount(struct file_system_type *fs_type, // IN: file system type of mount int flags, // IN: mount flags const char *dev_name, // IN: device mounting on void *rawData) // IN: mount arguments { return mount_nodev(fs_type, flags, rawData, HgfsReadSuper); } #elif defined VMW_GETSB_2618 /* *----------------------------------------------------------------------------- * * HgfsGetSb -- * * Invokes generic kernel code to prepare superblock for * deviceless filesystem. * * Results: * 0 on success * non-zero on failure * * Side effects: * None * *----------------------------------------------------------------------------- */ static int HgfsGetSb(struct file_system_type *fs_type, int flags, const char *dev_name, void *rawData, struct vfsmount *mnt) { return get_sb_nodev(fs_type, flags, rawData, HgfsReadSuper, mnt); } #else /* *----------------------------------------------------------------------------- * * HgfsGetSb -- * * Invokes generic kernel code to prepare superblock for * deviceless filesystem. * * Results: * The initialized superblock on success * NULL on failure * * Side effects: * None * *----------------------------------------------------------------------------- */ static struct super_block * HgfsGetSb(struct file_system_type *fs_type, int flags, const char *dev_name, void *rawData) { return get_sb_nodev(fs_type, flags, rawData, HgfsReadSuper); } #endif /* *----------------------------------------------------------------------------- * * HgfsResetOps -- * * Reset ops with more than one opcode back to the desired opcode. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static void HgfsResetOps(void) { hgfsVersionOpen = HGFS_OP_OPEN_V3; hgfsVersionRead = HGFS_OP_READ_V3; hgfsVersionWrite = HGFS_OP_WRITE_V3; hgfsVersionClose = HGFS_OP_CLOSE_V3; hgfsVersionSearchOpen = HGFS_OP_SEARCH_OPEN_V3; hgfsVersionSearchRead = HGFS_OP_SEARCH_READ_V3; hgfsVersionSearchClose = HGFS_OP_SEARCH_CLOSE_V3; hgfsVersionGetattr = HGFS_OP_GETATTR_V3; hgfsVersionSetattr = HGFS_OP_SETATTR_V3; hgfsVersionCreateDir = HGFS_OP_CREATE_DIR_V3; hgfsVersionDeleteFile = HGFS_OP_DELETE_FILE_V3; hgfsVersionDeleteDir = HGFS_OP_DELETE_DIR_V3; hgfsVersionRename = HGFS_OP_RENAME_V3; hgfsVersionQueryVolumeInfo = HGFS_OP_QUERY_VOLUME_INFO_V3; hgfsVersionCreateSymlink = HGFS_OP_CREATE_SYMLINK_V3; } /* * Public function implementations. */ /* *----------------------------------------------------------------------------- * * HgfsInitFileSystem -- * * Initializes the file system and registers it with the kernel. * * Results: * TRUE on success, FALSE on failure. * * Side effects: * None * *----------------------------------------------------------------------------- */ Bool HgfsInitFileSystem(void) { /* Initialize primitives. */ HgfsResetOps(); /* Setup the inode slab allocator. */ hgfsInodeCache = compat_kmem_cache_create("hgfsInodeCache", sizeof (HgfsInodeInfo), 0, SLAB_HWCACHE_ALIGN, HgfsInodeCacheCtor); if (hgfsInodeCache == NULL) { printk(KERN_WARNING "VMware hgfs: failed to create inode allocator\n"); return FALSE; } /* Initialize the transport. */ HgfsTransportInit(); /* * Register the filesystem. This should be the last thing we do * in init_module. */ if (register_filesystem(&hgfsType)) { printk(KERN_WARNING "VMware hgfs: failed to register filesystem\n"); kmem_cache_destroy(hgfsInodeCache); return FALSE; } LOG(4, (KERN_DEBUG "VMware hgfs: Module Loaded\n")); return TRUE; } /* *----------------------------------------------------------------------------- * * HgfsCleanupFileSystem -- * * Cleans up file system and unregisters it with the kernel. * * Results: * TRUE on success, FALSE on failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ Bool HgfsCleanupFileSystem(void) { Bool success = TRUE; /* * Unregister the filesystem. This should be the first thing we do in * the module cleanup code. */ if (unregister_filesystem(&hgfsType)) { printk(KERN_WARNING "VMware hgfs: failed to unregister filesystem\n"); success = FALSE; } /* Transport cleanup. */ HgfsTransportExit(); /* Destroy the inode and request slabs. */ kmem_cache_destroy(hgfsInodeCache); LOG(4, (KERN_DEBUG "VMware hgfs: Module Unloaded\n")); return success; } vmhgfs-only/filesystem.h 0000444 0000000 0000000 00000002325 13432725306 014367 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * filesystem.h -- * * High-level filesystem operations for the filesystem portion of * the vmhgfs driver. */ #ifndef _HGFS_DRIVER_FILESYSTEM_H_ #define _HGFS_DRIVER_FILESYSTEM_H_ #include "vm_basic_types.h" /* Public functions (with respect to the entire module). */ Bool HgfsInitFileSystem(void); Bool HgfsCleanupFileSystem(void); #endif // _HGFS_DRIVER_FILESYSTEM_H_ vmhgfs-only/fsutil.h 0000444 0000000 0000000 00000011420 13432725306 013505 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * fsutil.h -- * * Functions used in more than one type of filesystem operation will be * exported from this file. */ #ifndef _HGFS_DRIVER_FSUTIL_H_ #define _HGFS_DRIVER_FSUTIL_H_ /* Must come before any kernel header file. */ #include "driver-config.h" #include <linux/signal.h> #include "compat_fs.h" #include "module.h" /* For kuid_t kgid_t types. */ #include "inode.h" #include "request.h" #include "vm_basic_types.h" #include "hgfsProto.h" /* * Struct used to pass around attributes that Linux cares about. * These aren't just the attributes seen in HgfsAttr[V2]; we add a filename * pointer for convenience (used by SearchRead and Getattr). */ typedef struct HgfsAttrInfo { HgfsOp requestType; HgfsAttrValid mask; HgfsFileType type; /* File type */ uint64 allocSize; /* Disk allocation size (in bytes) */ uint64 size; /* File size (in bytes) */ uint64 accessTime; /* Time of last access */ uint64 writeTime; /* Time of last write */ uint64 attrChangeTime; /* Time file attributes were last changed */ HgfsPermissions specialPerms; /* Special permissions bits */ HgfsPermissions ownerPerms; /* Owner permissions bits */ HgfsPermissions groupPerms; /* Group permissions bits */ HgfsPermissions otherPerms; /* Other permissions bits */ HgfsPermissions effectivePerms; /* Permissions in effect for the user on the host. */ uint32 userId; /* UID */ uint32 groupId; /* GID */ uint64 hostFileId; /* Inode number */ } HgfsAttrInfo; /* * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down * so that it will fit. * Note, this is taken from CIFS so we apply the same algorithm. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) /* * We use hash_64 to convert the value to 31 bits, and * then add 1, to ensure that we don't end up with a 0 as the value. */ #if BITS_PER_LONG == 64 static inline ino_t HgfsUniqueidToIno(uint64 fileid) { return (ino_t)fileid; } #else #include <linux/hash.h> static inline ino_t HgfsUniqueidToIno(uint64 fileid) { return (ino_t)hash_64(fileid, (sizeof(ino_t) * 8) - 1) + 1; } #endif #else static inline ino_t HgfsUniqueidToIno(uint64 fileid) { ino_t ino = (ino_t) fileid; if (sizeof(ino_t) < sizeof(uint64)) { ino ^= fileid >> (sizeof(uint64)-sizeof(ino_t)) * 8; } return ino; } #endif /* Public functions (with respect to the entire module). */ int HgfsUnpackCommonAttr(HgfsReq *req, HgfsAttrInfo *attr); void HgfsChangeFileAttributes(struct inode *inode, HgfsAttrInfo const *attr); int HgfsPrivateGetattr(struct dentry *dentry, HgfsAttrInfo *attr, char **fileName); struct inode *HgfsIget(struct super_block *sb, ino_t ino, HgfsAttrInfo const *attr); int HgfsInstantiateRoot(struct super_block *sb, struct dentry **rootDentry); int HgfsInstantiate(struct dentry *dentry, ino_t ino, HgfsAttrInfo const *attr); int HgfsBuildPath(char *buffer, size_t bufferLen, struct dentry *dentry); void HgfsDentryAgeReset(struct dentry *dentry); void HgfsDentryAgeForce(struct dentry *dentry); int HgfsGetOpenMode(uint32 flags); int HgfsGetOpenFlags(uint32 flags); int HgfsCreateFileInfo(struct file *file, HgfsHandle handle); void HgfsReleaseFileInfo(struct file *file); int HgfsGetHandle(struct inode *inode, HgfsOpenMode mode, HgfsHandle *handle); int HgfsStatusConvertToLinux(HgfsStatus hgfsStatus); void HgfsSetUidGid(struct inode *parent, struct dentry *dentry, kuid_t uid, kgid_t gid); #endif // _HGFS_DRIVER_FSUTIL_H_ vmhgfs-only/backdoorGcc32.c 0000444 0000000 0000000 00000014043 13432725346 014550 0 ustar root root /********************************************************* * Copyright (C) 2005-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * backdoorGcc32.c -- * * Implements the real work for guest-side backdoor for GCC, 32-bit * target (supports inline ASM, GAS syntax). The asm sections are marked * volatile since vmware can change the registers content without the * compiler knowing it. * * XXX * I tried to write this more cleanly, but: * - There is no way to specify an "ebp" constraint * - "ebp" is ignored when specified as cloberred register * - gas barfs when there is more than 10 operands * - gas 2.7.2.3, depending on the order of the operands, can * mis-assemble without any warning * --hpreg * * Note that the problems with gas noted above might longer be relevant * now that we've upgraded most of our compiler versions. * --rrdharan */ #ifdef __cplusplus extern "C" { #endif #include "backdoor.h" #include "backdoorInt.h" /* *---------------------------------------------------------------------------- * * Backdoor_InOut -- * * Send a low-bandwidth basic request (16 bytes) to vmware, and return its * reply (24 bytes). * * Results: * Host-side response returned in bp IN/OUT parameter. * * Side effects: * Pokes the backdoor. * *---------------------------------------------------------------------------- */ void Backdoor_InOut(Backdoor_proto *myBp) // IN/OUT { uint32 dummy; __asm__ __volatile__( #ifdef __PIC__ "pushl %%ebx" "\n\t" #endif "pushl %%eax" "\n\t" "movl 20(%%eax), %%edi" "\n\t" "movl 16(%%eax), %%esi" "\n\t" "movl 12(%%eax), %%edx" "\n\t" "movl 8(%%eax), %%ecx" "\n\t" "movl 4(%%eax), %%ebx" "\n\t" "movl (%%eax), %%eax" "\n\t" "inl %%dx, %%eax" "\n\t" "xchgl %%eax, (%%esp)" "\n\t" "movl %%edi, 20(%%eax)" "\n\t" "movl %%esi, 16(%%eax)" "\n\t" "movl %%edx, 12(%%eax)" "\n\t" "movl %%ecx, 8(%%eax)" "\n\t" "movl %%ebx, 4(%%eax)" "\n\t" "popl (%%eax)" "\n\t" #ifdef __PIC__ "popl %%ebx" "\n\t" #endif : "=a" (dummy) : "0" (myBp) /* * vmware can modify the whole VM state without the compiler knowing * it. So far it does not modify EFLAGS. --hpreg */ : #ifndef __PIC__ "ebx", #endif "ecx", "edx", "esi", "edi", "memory" ); } /* *----------------------------------------------------------------------------- * * BackdoorHbIn -- * BackdoorHbOut -- * * Send a high-bandwidth basic request to vmware, and return its * reply. * * Results: * Host-side response returned in bp IN/OUT parameter. * * Side-effects: * Pokes the high-bandwidth backdoor port. * *----------------------------------------------------------------------------- */ void BackdoorHbIn(Backdoor_proto_hb *myBp) // IN/OUT { uint32 dummy; __asm__ __volatile__( #ifdef __PIC__ "pushl %%ebx" "\n\t" #endif "pushl %%ebp" "\n\t" "pushl %%eax" "\n\t" "movl 24(%%eax), %%ebp" "\n\t" "movl 20(%%eax), %%edi" "\n\t" "movl 16(%%eax), %%esi" "\n\t" "movl 12(%%eax), %%edx" "\n\t" "movl 8(%%eax), %%ecx" "\n\t" "movl 4(%%eax), %%ebx" "\n\t" "movl (%%eax), %%eax" "\n\t" "cld" "\n\t" "rep; insb" "\n\t" "xchgl %%eax, (%%esp)" "\n\t" "movl %%ebp, 24(%%eax)" "\n\t" "movl %%edi, 20(%%eax)" "\n\t" "movl %%esi, 16(%%eax)" "\n\t" "movl %%edx, 12(%%eax)" "\n\t" "movl %%ecx, 8(%%eax)" "\n\t" "movl %%ebx, 4(%%eax)" "\n\t" "popl (%%eax)" "\n\t" "popl %%ebp" "\n\t" #ifdef __PIC__ "popl %%ebx" "\n\t" #endif : "=a" (dummy) : "0" (myBp) /* * vmware can modify the whole VM state without the compiler knowing * it. --hpreg */ : #ifndef __PIC__ "ebx", #endif "ecx", "edx", "esi", "edi", "memory", "cc" ); } void BackdoorHbOut(Backdoor_proto_hb *myBp) // IN/OUT { uint32 dummy; __asm__ __volatile__( #ifdef __PIC__ "pushl %%ebx" "\n\t" #endif "pushl %%ebp" "\n\t" "pushl %%eax" "\n\t" "movl 24(%%eax), %%ebp" "\n\t" "movl 20(%%eax), %%edi" "\n\t" "movl 16(%%eax), %%esi" "\n\t" "movl 12(%%eax), %%edx" "\n\t" "movl 8(%%eax), %%ecx" "\n\t" "movl 4(%%eax), %%ebx" "\n\t" "movl (%%eax), %%eax" "\n\t" "cld" "\n\t" "rep; outsb" "\n\t" "xchgl %%eax, (%%esp)" "\n\t" "movl %%ebp, 24(%%eax)" "\n\t" "movl %%edi, 20(%%eax)" "\n\t" "movl %%esi, 16(%%eax)" "\n\t" "movl %%edx, 12(%%eax)" "\n\t" "movl %%ecx, 8(%%eax)" "\n\t" "movl %%ebx, 4(%%eax)" "\n\t" "popl (%%eax)" "\n\t" "popl %%ebp" "\n\t" #ifdef __PIC__ "popl %%ebx" "\n\t" #endif : "=a" (dummy) : "0" (myBp) : #ifndef __PIC__ "ebx", #endif "ecx", "edx", "esi", "edi", "memory", "cc" ); } #ifdef __cplusplus } #endif vmhgfs-only/super.c 0000444 0000000 0000000 00000025532 13432725306 013341 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * super.c -- * * Superblock operations for the filesystem portion of the vmhgfs driver. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include <linux/vfs.h> #include "compat_fs.h" #include "compat_statfs.h" #include "compat_kernel.h" #include "compat_slab.h" #include "compat_sched.h" #include "compat_version.h" #include "hgfsProto.h" #include "escBitvector.h" #include "cpName.h" #include "hgfsUtil.h" #include "request.h" #include "fsutil.h" #include "hgfsDevLinux.h" #include "module.h" #include "vm_assert.h" /* Hgfs filesystem superblock operations */ static struct inode *HgfsAllocInode(struct super_block *sb); static void HgfsDestroyInode(struct inode *inode); static void HgfsPutSuper(struct super_block *sb); #if defined VMW_STATFS_2618 static int HgfsStatfs(struct dentry *dentry, struct compat_kstatfs *stat); #else static int HgfsStatfs(struct super_block *sb, struct compat_kstatfs *stat); #endif struct super_operations HgfsSuperOperations = { .alloc_inode = HgfsAllocInode, .destroy_inode = HgfsDestroyInode, .put_super = HgfsPutSuper, .statfs = HgfsStatfs, }; /* *----------------------------------------------------------------------------- * * HgfsAllocInode -- * * Hgfs superblock 'alloc_inode' method. Called by the kernel to allocate * a new inode struct. We use this VFS method instead of read_inode because * we want to control both how we allocate and how we fill in the inode. * * Results: * Non-null: A valid inode. * null: Error in inode allocation. * * Side effects: * Allocates memory. * *----------------------------------------------------------------------------- */ static struct inode * HgfsAllocInode(struct super_block *sb) // IN: Superblock for the inode { HgfsInodeInfo *iinfo; iinfo = kmem_cache_alloc(hgfsInodeCache, GFP_KERNEL); if (!iinfo) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsAllocInode: " "can't allocate memory\n")); return NULL; } return &iinfo->inode; } /* *----------------------------------------------------------------------------- * * HgfsDestroyInode -- * * Hgfs superblock 'destroy_inode' method. Called by the kernel when it * deallocates an inode. We use this method instead of clear_inode because * we want to control both how we deallocate and how we clear the inode. * * Results: * None * * Side effects: * Frees memory associated with inode. * *----------------------------------------------------------------------------- */ static void HgfsDestroyInode(struct inode *inode) // IN: The VFS inode { kmem_cache_free(hgfsInodeCache, INODE_GET_II_P(inode)); } /* *----------------------------------------------------------------------------- * * HgfsPutSuper -- * * Hgfs superblock 'put_super' method. Called after a umount(2) of the * filesystem succeeds. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static void HgfsPutSuper(struct super_block *sb) // IN: The superblock { HgfsSuperInfo *si; ASSERT(sb); LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPutSuper: was called\n")); si = HGFS_SB_TO_COMMON(sb); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) bdi_destroy(&si->bdi); #endif kfree(si->shareName); kfree(si); } /* *---------------------------------------------------------------------- * * HgfsPackQueryVolumeRequest -- * * Setup the query volume request, depending on the op version. * * Results: * Returns zero on success, or negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsPackQueryVolumeRequest(struct dentry *dentry, // IN: File pointer for this open HgfsOp opUsed, // IN: Op to be used. HgfsReq *req) // IN/OUT: Packet to write into { char *name; uint32 *nameLength; size_t requestSize; int result; ASSERT(dentry); ASSERT(req); switch (opUsed) { case HGFS_OP_QUERY_VOLUME_INFO_V3: { HgfsRequest *requestHeader; HgfsRequestQueryVolumeV3 *requestV3; requestHeader = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); requestHeader->op = opUsed; requestHeader->id = req->id; requestV3 = (HgfsRequestQueryVolumeV3 *)HGFS_REQ_PAYLOAD_V3(req); /* We'll use these later. */ name = requestV3->fileName.name; nameLength = &requestV3->fileName.length; requestV3->fileName.flags = 0; requestV3->fileName.fid = HGFS_INVALID_HANDLE; requestV3->fileName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; requestV3->reserved = 0; requestSize = HGFS_REQ_PAYLOAD_SIZE_V3(requestV3); break; } case HGFS_OP_QUERY_VOLUME_INFO: { HgfsRequestQueryVolume *request; request = (HgfsRequestQueryVolume *)(HGFS_REQ_PAYLOAD(req)); request->header.op = opUsed; request->header.id = req->id; /* We'll use these later. */ name = request->fileName.name; nameLength = &request->fileName.length; requestSize = sizeof *request; break; } default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackQueryVolumeRequest: unexpected " "OP type encountered\n")); return -EPROTO; } /* Build full name to send to server. */ if (HgfsBuildPath(name, req->bufferSize - (requestSize - 1), dentry) < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackQueryVolumeRequest: build path failed\n")); return -EINVAL; } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPackQueryVolumeRequest: opening \"%s\"\n", name)); /* Convert to CP name. */ result = CPName_ConvertTo(name, req->bufferSize - (requestSize - 1), name); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackQueryVolumeRequest: CP conversion failed\n")); return -EINVAL; } *nameLength = (uint32) result; req->payloadSize = requestSize + result; return 0; } /* *----------------------------------------------------------------------------- * * HgfsStatfs -- * * Hgfs superblock 'statfs' method. Called when statfs(2) is invoked on the * filesystem. * * Results: * 0 on success * error < 0 on failure * * Side effects: * None * *----------------------------------------------------------------------------- */ #if defined VMW_STATFS_2618 static int HgfsStatfs(struct dentry *dentry, // IN : The directory entry struct compat_kstatfs *stat) // OUT: Stat to fill in #else static int HgfsStatfs(struct super_block *sb, // IN : The superblock struct compat_kstatfs *stat) // OUT: Stat to fill in #endif { HgfsReq *req; int result = 0; struct dentry *dentryToUse; struct super_block *sbToUse; HgfsOp opUsed; HgfsStatus replyStatus; uint64 freeBytes; uint64 totalBytes; ASSERT(stat); #if defined VMW_STATFS_2618 ASSERT(dentry); ASSERT(dentry->d_sb); dentryToUse = dentry; sbToUse = dentry->d_sb; #else ASSERT(sb); ASSERT(sb->s_root); dentryToUse = sb->s_root; sbToUse = sb; #endif LOG(6, (KERN_DEBUG "VMware hgfs: HgfsStatfs: was called\n")); memset(stat, 0, sizeof *stat); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsStatfs: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionQueryVolumeInfo; result = HgfsPackQueryVolumeRequest(dentryToUse, opUsed, req); if (result != 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsStatfs: error packing request\n")); goto out; } result = HgfsSendRequest(req); if (result == 0) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsStatfs: got reply\n")); replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); /* * If the statfs succeeded on the server, copy the stats * into the kstatfs struct, otherwise return an error. */ switch (result) { case 0: stat->f_type = HGFS_SUPER_MAGIC; stat->f_bsize = sbToUse->s_blocksize; stat->f_namelen = PATH_MAX; if (opUsed == HGFS_OP_QUERY_VOLUME_INFO_V3) { totalBytes = ((HgfsReplyQueryVolumeV3 *)HGFS_REP_PAYLOAD_V3(req))->totalBytes; freeBytes = ((HgfsReplyQueryVolumeV3 *)HGFS_REP_PAYLOAD_V3(req))->freeBytes; } else { totalBytes = ((HgfsReplyQueryVolume *)HGFS_REQ_PAYLOAD(req))->totalBytes; freeBytes = ((HgfsReplyQueryVolume *)HGFS_REQ_PAYLOAD(req))->freeBytes; } stat->f_blocks = totalBytes >> sbToUse->s_blocksize_bits; stat->f_bfree = freeBytes >> sbToUse->s_blocksize_bits; stat->f_bavail = stat->f_bfree; break; case -EPERM: /* * We're cheating! This will cause statfs will return success. * We're doing this because an old server will complain when it gets * a statfs on a per-share mount. Rather than have 'df' spit an * error, let's just return all zeroes. */ result = 0; break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_QUERY_VOLUME_INFO_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsStatfs: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionQueryVolumeInfo = HGFS_OP_QUERY_VOLUME_INFO; goto retry; } break; default: break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsStatfs: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsStatfs: server returned error: " "%d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsStatfs: unknown error: %d\n", result)); } out: HgfsFreeRequest(req); return result; } vmhgfs-only/rpcout.h 0000444 0000000 0000000 00000004731 13432725330 013517 0 ustar root root /********************************************************* * Copyright (C) 2007-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * rpcout.h -- * * Remote Procedure Call between VMware and guest applications * C declarations */ #ifndef __RPCOUT_H__ # define __RPCOUT_H__ #include "vm_basic_types.h" #define RPCI_PROTOCOL_NUM 0x49435052 /* 'RPCI' ;-) */ typedef struct RpcOut RpcOut; RpcOut *RpcOut_Construct(void); void RpcOut_Destruct(RpcOut *out); Bool RpcOut_start(RpcOut *out); Bool RpcOut_send(RpcOut *out, char const *request, size_t reqLen, Bool *rpcStatus, char const **reply, size_t *repLen); Bool RpcOut_stop(RpcOut *out); /* * This is the only method needed to send a message to vmware for * 99% of uses. I'm leaving the others defined here so people know * they can be exported again if the need arises. [greg] */ Bool RpcOut_sendOne(char **reply, size_t *repLen, char const *reqFmt, ...); /* * A version of the RpcOut_sendOne function that works with UTF-8 * strings and other data that would be corrupted by Win32's * FormatMessage function (which is used by RpcOut_sendOne()). */ Bool RpcOut_SendOneRaw(void *request, size_t reqLen, char **reply, size_t *repLen); /* * A variant of the RpcOut_SendOneRaw in which the caller supplies the * receive buffer so as to avoid the need to call malloc internally. * Useful in situations where calling malloc is not allowed. */ Bool RpcOut_SendOneRawPreallocated(void *request, size_t reqLen, char *reply, size_t repLen); /* * As the above but must be run by admin/root to make the privileged * RPC call successfully. */ Bool RpcOut_SendOneRawPriv(void *request, size_t reqLen, char **reply, size_t *repLen); #endif /* __RPCOUT_H__ */ vmhgfs-only/vmhgfs_version.h 0000444 0000000 0000000 00000002230 13432725306 015235 0 ustar root root /********************************************************* * Copyright (C) 2007-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmhgfs_version.h -- * * Version definitions for the Linux vmhgfs driver. */ #ifndef _VMHGFS_VERSION_H_ #define _VMHGFS_VERSION_H_ #define VMHGFS_DRIVER_VERSION 2.0.21.0 #define VMHGFS_DRIVER_VERSION_COMMAS 2,0,21,0 #define VMHGFS_DRIVER_VERSION_STRING "2.0.21.0" #endif /* _VMHGFS_VERSION_H_ */ vmhgfs-only/backdoorInt.h 0000444 0000000 0000000 00000002015 13432725346 014442 0 ustar root root /********************************************************* * Copyright (C) 2005-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * backdoorInt.h -- * * Internal function prototypes for the real backdoor work. */ void BackdoorHbIn(Backdoor_proto_hb *bp); void BackdoorHbOut(Backdoor_proto_hb *bp); vmhgfs-only/backdoor.c 0000444 0000000 0000000 00000016231 13432725346 013767 0 ustar root root /********************************************************* * Copyright (C) 1999-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * backdoor.c -- * * First layer of the internal communication channel between guest * applications and vmware * * This is the backdoor. By using special ports of the virtual I/O space, * and the virtual CPU registers, a guest application can send a * synchroneous basic request to vmware, and vmware can reply to it. */ #ifdef __cplusplus extern "C" { #endif #include "backdoor_def.h" #include "backdoor.h" #include "backdoorInt.h" #ifdef USE_VALGRIND /* * When running under valgrind, we need to ensure we have the correct register * state when poking the backdoor. The VALGRIND_NON_SIMD_CALLx macros are used * to escape from the valgrind emulated CPU to the physical CPU. */ #include "vm_valgrind.h" #endif #if defined(BACKDOOR_DEBUG) && defined(USERLEVEL) #if defined(__KERNEL__) || defined(_KERNEL) #else # include "debug.h" #endif # include <stdio.h> # define BACKDOOR_LOG(args) Debug args # define BACKDOOR_LOG_PROTO_STRUCT(x) BackdoorPrintProtoStruct((x)) # define BACKDOOR_LOG_HB_PROTO_STRUCT(x) BackdoorPrintHbProtoStruct((x)) /* *---------------------------------------------------------------------------- * * BackdoorPrintProtoStruct -- * BackdoorPrintHbProtoStruct -- * * Print the contents of the specified backdoor protocol structure via * printf. * * Results: * None. * * Side effects: * Output to stdout. * *---------------------------------------------------------------------------- */ void BackdoorPrintProtoStruct(Backdoor_proto *myBp) { Debug("magic 0x%08x, command %d, size %"FMTSZ"u, port %d\n", myBp->in.ax.word, myBp->in.cx.halfs.low, myBp->in.size, myBp->in.dx.halfs.low); #ifndef VM_X86_64 Debug("ax %#x, " "bx %#x, " "cx %#x, " "dx %#x, " "si %#x, " "di %#x\n", myBp->out.ax.word, myBp->out.bx.word, myBp->out.cx.word, myBp->out.dx.word, myBp->out.si.word, myBp->out.di.word); #else Debug("ax %#"FMT64"x, " "bx %#"FMT64"x, " "cx %#"FMT64"x, " "dx %#"FMT64"x, " "si %#"FMT64"x, " "di %#"FMT64"x\n", myBp->out.ax.quad, myBp->out.bx.quad, myBp->out.cx.quad, myBp->out.dx.quad, myBp->out.si.quad, myBp->out.di.quad); #endif } void BackdoorPrintHbProtoStruct(Backdoor_proto_hb *myBp) { Debug("magic 0x%08x, command %d, size %"FMTSZ"u, port %d, " "srcAddr %"FMTSZ"u, dstAddr %"FMTSZ"u\n", myBp->in.ax.word, myBp->in.bx.halfs.low, myBp->in.size, myBp->in.dx.halfs.low, myBp->in.srcAddr, myBp->in.dstAddr); #ifndef VM_X86_64 Debug("ax %#x, " "bx %#x, " "cx %#x, " "dx %#x, " "si %#x, " "di %#x, " "bp %#x\n", myBp->out.ax.word, myBp->out.bx.word, myBp->out.cx.word, myBp->out.dx.word, myBp->out.si.word, myBp->out.di.word, myBp->out.bp.word); #else Debug("ax %#"FMT64"x, " "bx %#"FMT64"x, " "cx %#"FMT64"x, " "dx %#"FMT64"x, " "si %#"FMT64"x, " "di %#"FMT64"x, " "bp %#"FMT64"x\n", myBp->out.ax.quad, myBp->out.bx.quad, myBp->out.cx.quad, myBp->out.dx.quad, myBp->out.si.quad, myBp->out.di.quad, myBp->out.bp.quad); #endif } #else # define BACKDOOR_LOG(args) # define BACKDOOR_LOG_PROTO_STRUCT(x) # define BACKDOOR_LOG_HB_PROTO_STRUCT(x) #endif /* *----------------------------------------------------------------------------- * * Backdoor -- * * Send a low-bandwidth basic request (16 bytes) to vmware, and return its * reply (24 bytes). * * Result: * None * * Side-effects: * None * *----------------------------------------------------------------------------- */ #ifdef USE_VALGRIND static void Backdoor_InOutValgrind(uint16 tid, Backdoor_proto *myBp) { Backdoor_InOut(myBp); } #endif void Backdoor(Backdoor_proto *myBp) // IN/OUT { ASSERT(myBp); myBp->in.ax.word = BDOOR_MAGIC; myBp->in.dx.halfs.low = BDOOR_PORT; BACKDOOR_LOG(("Backdoor: before ")); BACKDOOR_LOG_PROTO_STRUCT(myBp); #ifdef USE_VALGRIND VALGRIND_NON_SIMD_CALL1(Backdoor_InOutValgrind, myBp); #else Backdoor_InOut(myBp); #endif BACKDOOR_LOG(("Backdoor: after ")); BACKDOOR_LOG_PROTO_STRUCT(myBp); } /* *----------------------------------------------------------------------------- * * Backdoor_HbOut -- * * Send a high-bandwidth basic request to vmware, and return its * reply. * * Result: * The host-side response is returned via the IN/OUT parameter. * * Side-effects: * Pokes the high-bandwidth backdoor. * *----------------------------------------------------------------------------- */ #ifdef USE_VALGRIND static void BackdoorHbOutValgrind(uint16 tid, Backdoor_proto_hb *myBp) { BackdoorHbOut(myBp); } #endif void Backdoor_HbOut(Backdoor_proto_hb *myBp) // IN/OUT { ASSERT(myBp); myBp->in.ax.word = BDOOR_MAGIC; myBp->in.dx.halfs.low = BDOORHB_PORT; BACKDOOR_LOG(("Backdoor_HbOut: before ")); BACKDOOR_LOG_HB_PROTO_STRUCT(myBp); #ifdef USE_VALGRIND VALGRIND_NON_SIMD_CALL1(BackdoorHbOutValgrind, myBp); #else BackdoorHbOut(myBp); #endif BACKDOOR_LOG(("Backdoor_HbOut: after ")); BACKDOOR_LOG_HB_PROTO_STRUCT(myBp); } /* *----------------------------------------------------------------------------- * * Backdoor_HbIn -- * * Send a basic request to vmware, and return its high-bandwidth * reply * * Result: * Host-side response returned via the IN/OUT parameter. * * Side-effects: * Pokes the high-bandwidth backdoor. * *----------------------------------------------------------------------------- */ #ifdef USE_VALGRIND static void BackdoorHbInValgrind(uint16 tid, Backdoor_proto_hb *myBp) { BackdoorHbIn(myBp); } #endif void Backdoor_HbIn(Backdoor_proto_hb *myBp) // IN/OUT { ASSERT(myBp); myBp->in.ax.word = BDOOR_MAGIC; myBp->in.dx.halfs.low = BDOORHB_PORT; BACKDOOR_LOG(("Backdoor_HbIn: before ")); BACKDOOR_LOG_HB_PROTO_STRUCT(myBp); #ifdef USE_VALGRIND VALGRIND_NON_SIMD_CALL1(BackdoorHbInValgrind, myBp); #else BackdoorHbIn(myBp); #endif BACKDOOR_LOG(("Backdoor_HbIn: after ")); BACKDOOR_LOG_HB_PROTO_STRUCT(myBp); } #ifdef __cplusplus } #endif vmhgfs-only/bdhandler.c 0000444 0000000 0000000 00000013651 13432725306 014125 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * bdhandler.c -- * * Background thread for handling backdoor requests and replies. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include <linux/errno.h> #include "transport.h" #include "hgfsBd.h" #include "hgfsDevLinux.h" #include "hgfsProto.h" #include "module.h" #include "request.h" #include "rpcout.h" #include "vm_assert.h" static Bool HgfsBdChannelOpen(HgfsTransportChannel *channel); static void HgfsBdChannelClose(HgfsTransportChannel *channel); static HgfsReq * HgfsBdChannelAllocate(size_t payloadSize); void HgfsBdChannelFree(HgfsReq *req); static int HgfsBdChannelSend(HgfsTransportChannel *channel, HgfsReq *req); static HgfsTransportChannel channel = { .name = "backdoor", .ops.open = HgfsBdChannelOpen, .ops.close = HgfsBdChannelClose, .ops.allocate = HgfsBdChannelAllocate, .ops.free = HgfsBdChannelFree, .ops.send = HgfsBdChannelSend, .priv = NULL, .status = HGFS_CHANNEL_NOTCONNECTED }; /* *----------------------------------------------------------------------------- * * HgfsBdChannelOpen -- * * Open the backdoor in an idempotent way. * * Results: * TRUE on success, FALSE on failure. * * Side effects: * None * *----------------------------------------------------------------------------- */ static Bool HgfsBdChannelOpen(HgfsTransportChannel *channel) // IN: Channel { Bool ret = FALSE; ASSERT(channel->status == HGFS_CHANNEL_NOTCONNECTED); if (HgfsBd_OpenBackdoor((RpcOut **)&channel->priv)) { LOG(8, ("VMware hgfs: %s: backdoor opened.\n", __func__)); ret = TRUE; ASSERT(channel->priv != NULL); } return ret; } /* *----------------------------------------------------------------------------- * * HgfsBdChannelClose -- * * Close the backdoor in an idempotent way. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static void HgfsBdChannelClose(HgfsTransportChannel *channel) // IN: Channel { ASSERT(channel->priv != NULL); HgfsBd_CloseBackdoor((RpcOut **)&channel->priv); ASSERT(channel->priv == NULL); LOG(8, ("VMware hgfs: %s: backdoor closed.\n", __func__)); } /* *----------------------------------------------------------------------------- * * HgfsBdChannelAllocate -- * * Allocate request in a way that is suitable for sending through * backdoor. * * Results: * NULL on failure; otherwise address of the new request. * * Side effects: * None * *----------------------------------------------------------------------------- */ static HgfsReq * HgfsBdChannelAllocate(size_t payloadSize) // IN: size of requests payload { HgfsReq *req; req = kmalloc(sizeof(*req) + HGFS_SYNC_REQREP_CLIENT_CMD_LEN + payloadSize, GFP_KERNEL); if (likely(req)) { /* Setup the packet prefix. */ memcpy(req->buffer, HGFS_SYNC_REQREP_CLIENT_CMD, HGFS_SYNC_REQREP_CLIENT_CMD_LEN); req->payload = req->buffer + HGFS_SYNC_REQREP_CLIENT_CMD_LEN; req->bufferSize = payloadSize; } return req; } /* *----------------------------------------------------------------------------- * * HgfsBdChannelFree -- * * Free previously allocated request. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void HgfsBdChannelFree(HgfsReq *req) { ASSERT(req); kfree(req); } /* *---------------------------------------------------------------------- * * HgfsBdChannelSend -- * * Send a request via backdoor. * * Results: * 0 on success, negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsBdChannelSend(HgfsTransportChannel *channel, // IN: Channel HgfsReq *req) // IN: request to send { char const *replyPacket = NULL; size_t payloadSize; int ret; ASSERT(req); ASSERT(req->state == HGFS_REQ_STATE_UNSENT); ASSERT(req->payloadSize <= req->bufferSize); LOG(8, ("VMware hgfs: %s: backdoor sending.\n", __func__)); payloadSize = req->payloadSize; ret = HgfsBd_Dispatch(channel->priv, HGFS_REQ_PAYLOAD(req), &payloadSize, &replyPacket); if (ret == 0) { LOG(8, ("VMware hgfs: %s: Backdoor reply received.\n", __func__)); /* Request sent successfully. Copy the reply and wake the client. */ ASSERT(replyPacket); ASSERT(payloadSize <= req->bufferSize); memcpy(HGFS_REQ_PAYLOAD(req), replyPacket, payloadSize); req->payloadSize = payloadSize; HgfsCompleteReq(req); } return ret; } /* *---------------------------------------------------------------------- * * HgfsGetBdChannel -- * * Initialize backdoor channel. * * Results: * Always return pointer to back door channel. * * Side effects: * None * *---------------------------------------------------------------------- */ HgfsTransportChannel* HgfsGetBdChannel(void) { return &channel; } vmhgfs-only/hgfsEscape.c 0000444 0000000 0000000 00000075421 13432725346 014261 0 ustar root root /********************************************************* * Copyright (C) 2008-2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * hgfsEscape.c -- * * Escape and unescape illegal filenames for different platforms. * */ #ifdef __KERNEL__ # include "driver-config.h" # include <linux/string.h> #elif defined __FreeBSD__ # if defined _KERNEL # include <sys/libkern.h> # define strchr(s,c) index(s,c) # else # include <string.h> # endif # define memmove(s1,s2,n) bcopy(s2,s1,n) #elif defined __APPLE__ && defined KERNEL # include <string.h> #elif !defined sun # include <stdlib.h> # include <string.h> #else # include <string.h> #endif #include "vmware.h" #include "hgfsEscape.h" #include "cpName.h" #ifdef _WIN32 #define UNREFERENCED_PARAMETER(P) (P) /* These characters are illegal in Windows file names. */ const char* HGFS_ILLEGAL_CHARS = "/\\*?:\"<>|"; const char* HGFS_SUBSTITUTE_CHARS = "!@#$^&(){"; /* Last character of a file name in Windows can be neither dot nor space. */ const char* HGFS_ILLEGAL_LAST_CHARS = ". "; /* http://msdn.microsoft.com/en-us/library/aa365247.aspx */ char *HgfsReservedNames[] = {"CON", "PRN", "AUX", "NUL"}; char *HgfsReservedNamesWithNumber[] = {"COM", "LPT"}; #define HGFS_RESERVED_NAME_CHARS_LENGTH 3 #define HGFS_RESERVED_NAME_WITH_NUMBER_CHARS_LENGTH (HGFS_RESERVED_NAME_CHARS_LENGTH + 1) /* Check for special escaping cases - reserved names and illegal last characters. */ #define IS_SPECIAL_CASE_ESCAPE(b,o,l) HgfsIsSpecialCaseEscape(b,o,l) /* Process Windows reserved names. */ #define PROCESS_RESERVED_NAME(b,s,p,o,c) \ if (!HgfsProcessReservedName(b,s,p,o,c)) \ { \ return FALSE; \ } /* Process Windows reserved names. */ #define PROCESS_LAST_CHARACTER(b,s,p,c) \ if (!HgfsProcessLastCharacter(b,s,p,c)) \ { \ return FALSE; \ } #else // _WIN32 #define UNREFERENCED_PARAMETER(P) /* There is no special escape sequences on other than Windows platforms. */ #define IS_SPECIAL_CASE_ESCAPE(b,o,l) FALSE /* There is no reserved names on other then Windows platforms. */ #define PROCESS_RESERVED_NAME(b,s,p,o,c) /* There is no special processing for the last character on non-Windows platforms. */ #define PROCESS_LAST_CHARACTER(b,s,p,c) #if defined __APPLE__ /* These characters are illegal in MAC OS file names. */ const char* HGFS_ILLEGAL_CHARS = "/:"; const char* HGFS_SUBSTITUTE_CHARS = "!&"; #else // __APPLE__ /* These characters are illegal in Linux file names. */ const char* HGFS_ILLEGAL_CHARS = "/"; const char* HGFS_SUBSTITUTE_CHARS = "!"; #endif // __APPLE__ #endif // _WIN32 #define HGFS_ESCAPE_CHAR '%' #define HGFS_ESCAPE_SUBSTITUE_CHAR ']' typedef enum { HGFS_ESCAPE_ILLEGAL_CHARACTER, HGFS_ESCAPE_RESERVED_NAME, HGFS_ESCAPE_ILLEGAL_LAST_CHARACTER, HGFS_ESCAPE_ESCAPE_SEQUENCE, HGFS_ESCAPE_COMPLETE } HgfsEscapeReason; typedef Bool (*HgfsEnumCallback)(char const *bufIn, uint32 offset, HgfsEscapeReason reason, void* context); /* * The structure is used by HgfsAddEscapeCharacter to keep context information between * invocations * All offsets defined in this structure are in characters, not bytes */ typedef struct { uint32 processedOffset; // Offset of the first unprocessed input character uint32 outputBufferLength; // Number of characters in the output buffer uint32 outputOffset; // Number of characters that are already in the output char *outputBuffer; // Pointer to the output buffer } HgfsEscapeContext; static void HgfsEscapeUndoComponent(char *bufIn, uint32 *totalLength); static int HgfsEscapeGetComponentSize(char const *bufIn, uint32 sizeIn); static int HgfsEscapeDoComponent(char const *bufIn, uint32 sizeIn, uint32 sizeBufOut, char *bufOut); /* *----------------------------------------------------------------------------- * * HgfsAddEscapeCharacter -- * * Callback function that is called by HgfsEnumerate to insert an escape sequence * into the input name. * * Results: * TRUE if successful, FALSE if there is an error like the output buffer is * too small. * * Side effects: * Updates the output buffer pointer (stored in the context variable). * *----------------------------------------------------------------------------- */ static Bool HgfsAddEscapeCharacter(char const * bufIn, // IN: input name uint32 offset, // IN: offset that requires escaping HgfsEscapeReason reason, // IN: reason for esaping void *context) // IN/OUT: convertion context { HgfsEscapeContext *escapeContext = (HgfsEscapeContext *)context; uint32 charactersToCopy; uint32 outputSpace; char* illegal; Bool result = TRUE; ASSERT(offset >= escapeContext->processedOffset); // Scanning forward charactersToCopy = offset - escapeContext->processedOffset; if (escapeContext->outputOffset + charactersToCopy > escapeContext->outputBufferLength) { return FALSE; } memcpy(escapeContext->outputBuffer + escapeContext->outputOffset, bufIn + escapeContext->processedOffset, charactersToCopy * sizeof *bufIn); escapeContext->outputOffset += charactersToCopy; escapeContext->processedOffset += charactersToCopy; outputSpace = escapeContext->outputBufferLength - escapeContext->outputOffset; switch(reason) { case HGFS_ESCAPE_ILLEGAL_CHARACTER: if (outputSpace < 2) { return FALSE; } illegal = strchr(HGFS_ILLEGAL_CHARS, bufIn[escapeContext->processedOffset]); escapeContext->processedOffset++; // Skip illegal input character ASSERT(illegal != NULL); escapeContext->outputBuffer[escapeContext->outputOffset] = HGFS_SUBSTITUTE_CHARS[illegal - HGFS_ILLEGAL_CHARS]; escapeContext->outputOffset++; escapeContext->outputBuffer[escapeContext->outputOffset] = HGFS_ESCAPE_CHAR; escapeContext->outputOffset++; break; case HGFS_ESCAPE_RESERVED_NAME: if (outputSpace < 1) { return FALSE; } escapeContext->outputBuffer[escapeContext->outputOffset] = HGFS_ESCAPE_CHAR; escapeContext->outputOffset++; break; case HGFS_ESCAPE_ILLEGAL_LAST_CHARACTER: if (outputSpace < 1) { return FALSE; } escapeContext->outputBuffer[escapeContext->outputOffset] = HGFS_ESCAPE_CHAR; escapeContext->outputOffset++; break; case HGFS_ESCAPE_ESCAPE_SEQUENCE: if (outputSpace < 2) { return FALSE; } escapeContext->processedOffset++; // Skip input esape character escapeContext->outputBuffer[escapeContext->outputOffset] = HGFS_ESCAPE_SUBSTITUE_CHAR; escapeContext->outputOffset++; escapeContext->outputBuffer[escapeContext->outputOffset] = HGFS_ESCAPE_CHAR; escapeContext->outputOffset++; break; case HGFS_ESCAPE_COMPLETE: if (outputSpace < 1) { return FALSE; } escapeContext->outputBuffer[escapeContext->outputOffset] = '\0'; break; default: result = FALSE; ASSERT(FALSE); } return result; } /* *----------------------------------------------------------------------------- * * HgfsCountEscapeChars -- * * Callback function that is called by HgfsEnumerate to count additional characters * that need to be inserted in the input name. * * Results: * TRUE since it never fails. * * Side effects: * None * *----------------------------------------------------------------------------- */ static Bool HgfsCountEscapeChars(char const *bufIn, // IN: input name uint32 offset, // IN: offset where escape is needed HgfsEscapeReason reason, // IN: reason for escaping void *context) // IN/OUT: context info { UNREFERENCED_PARAMETER(bufIn); UNREFERENCED_PARAMETER(offset); if (reason != HGFS_ESCAPE_COMPLETE) { uint32 *counter = (uint32*)context; (*counter)++; } return TRUE; } #ifdef _WIN32 /* *----------------------------------------------------------------------------- * * HgfsLetterToUpper -- * * Converts lowercase English letters to uppercase. * If the symbol is not a lowercase English letter returns the original character. * * Results: * Converted character. * * Side effects: * None * *----------------------------------------------------------------------------- */ static char HgfsLetterToUpper(char letter) { if (letter >= 'a' && letter <= 'z') { return letter - ('a' - 'A'); } return letter; } /* *----------------------------------------------------------------------------- * * HgfsIsEqualPrefix -- * * Verifies if the string prefix is equal to the given prefix. * It assumes that the prefix includes only uppercase English letters or numbers * and it does not have any international characters. * The string must be either NULL terminated or not shorter then the prefix. * * Results: * TRUE if the uppcased string starts with the given prefix. False otherwise. * * Side effects: * None * *----------------------------------------------------------------------------- */ static Bool HgfsIsEqualPrefix(char const *prefix, // IN: prefix to check char const *string, // IN: input string uint32 prefixLength) // IN: length of the prefix in characters { int i; for (i = 0; i < prefixLength; i++) { ASSERT(prefix[i] > 0 && (prefix[i] < 'a' || prefix[i] > 'z' )); if (prefix[i] != HgfsLetterToUpper(string[i])) { return FALSE; } } return TRUE; } /* *----------------------------------------------------------------------------- * * HgfsIsReservedPrefix -- * * Verifies if the name's prefix is one of the reserved names. * * Results: * TRUE if the name's prefix is one of the reserved names. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static Bool HgfsIsReservedPrefix(char const *bufIn) // IN: input name { uint32 i; for (i = 0; i < ARRAYSIZE(HgfsReservedNames); i++) { if (HgfsIsEqualPrefix(HgfsReservedNames[i], bufIn, HGFS_RESERVED_NAME_CHARS_LENGTH)) { return TRUE; } } return FALSE; } /* *----------------------------------------------------------------------------- * * HgfsIsReservedPrefixWithNumber -- * * Verifies if the name's prefix is one of the reserved names with number: * COM1-9 or LPT1-9. * * Results: * TRUE if the name's prefix is one of the reserved names with number. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static Bool HgfsIsReservedPrefixWithNumber(char const *bufIn) // IN: input name { uint32 i; for (i = 0; i < ARRAYSIZE(HgfsReservedNamesWithNumber); i++) { if (HgfsIsEqualPrefix(HgfsReservedNamesWithNumber[i], bufIn, HGFS_RESERVED_NAME_CHARS_LENGTH) && bufIn[HGFS_RESERVED_NAME_CHARS_LENGTH] >= '1' && bufIn[HGFS_RESERVED_NAME_CHARS_LENGTH] <= '9') { return TRUE; } } return FALSE; } /* *----------------------------------------------------------------------------- * * HgfsIsSpecialCaseEscape -- * * Verifies if the escape character is a part of special case escape sequence * that exists only in Windows - escaped reserved name or escaped illegal last * character. * * Results: * TRUE if the name's prefix is one of the reserved names with number. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static Bool HgfsIsSpecialCaseEscape(char const *bufIn, // IN: input name uint32 offset, // IN: offset of the escape character uint32 length) // IN: length of the name in characters { if (offset + 1 == length && strchr(HGFS_ILLEGAL_LAST_CHARS, bufIn[offset - 1]) != NULL) { return TRUE; } if (offset == HGFS_RESERVED_NAME_CHARS_LENGTH && (length == HGFS_RESERVED_NAME_CHARS_LENGTH + 1 || bufIn[offset+1] == '.')) { return HgfsIsReservedPrefix(bufIn); } if (offset == HGFS_RESERVED_NAME_WITH_NUMBER_CHARS_LENGTH && (length == HGFS_RESERVED_NAME_WITH_NUMBER_CHARS_LENGTH + 1 || bufIn[offset+1] == '.')) { return HgfsIsReservedPrefixWithNumber(bufIn); } return FALSE; } /* *----------------------------------------------------------------------------- * * HgfsProcessReservedName -- * * Verifies if the name is one of reserved Windows file names. * If it is a reserved name invokes callback that performs required * processing. * * Results: * TRUE if no processing is required of if processing succeeded, * FALSE if processing failed. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static Bool HgfsProcessReservedName(char const *bufIn, // IN: Unescaped input buffer uint32 sizeIn, // IN: Length of the input HgfsEnumCallback processEscape, // IN: Callack that is invoked // if input is reserved name uint32 *offset, // OUT: New offset in the input void *context) // IN/OUT: Context for callback { /* Look reserved names: CON, PRN, AUX, NUL. */ if (sizeIn >= HGFS_RESERVED_NAME_CHARS_LENGTH && HgfsIsReservedPrefix(bufIn)) { if (HGFS_RESERVED_NAME_CHARS_LENGTH == sizeIn || bufIn[HGFS_RESERVED_NAME_CHARS_LENGTH] == '.') { if (!processEscape(bufIn, HGFS_RESERVED_NAME_CHARS_LENGTH, HGFS_ESCAPE_RESERVED_NAME, context)) { return FALSE; } *offset = HGFS_RESERVED_NAME_CHARS_LENGTH; } } /* Look reserved names with numbers: COM1-9 and LPT1-9. */ if (sizeIn >= HGFS_RESERVED_NAME_WITH_NUMBER_CHARS_LENGTH && HgfsIsReservedPrefixWithNumber(bufIn)) { if (HGFS_RESERVED_NAME_WITH_NUMBER_CHARS_LENGTH == sizeIn || bufIn[HGFS_RESERVED_NAME_WITH_NUMBER_CHARS_LENGTH] == '.') { if (!processEscape(bufIn, HGFS_RESERVED_NAME_WITH_NUMBER_CHARS_LENGTH, HGFS_ESCAPE_RESERVED_NAME, context)) { return FALSE; } *offset = HGFS_RESERVED_NAME_WITH_NUMBER_CHARS_LENGTH; } } return TRUE; } /* *----------------------------------------------------------------------------- * * HgfsProcessLastCharacter -- * * Verifies if the trailing character in the name is a valid last character. * In Windows it is illegal to have a file name that ends with dot ('.') or * space (' '). The only exception is "." and ".." directory names. * If the last character is invalid the function invokes a callback to process it. * * Results: * TRUE if no processing is required of if processing succeeded, * FALSE if processing failed. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static Bool HgfsProcessLastCharacter(char const *bufIn, // IN: Unescaped input buffer uint32 sizeIn, // IN: Length of the input HgfsEnumCallback processEscape, // IN: Callack that is invoked // when escaping is needed void *context) // IN/OUT: Callback context { /* If the filename is '.' or '..' we shouldn't escape it. */ if ((sizeIn == 1 && bufIn[0] == '.') || (sizeIn == 2 && bufIn[0] == '.' && bufIn[1] == '.')) { return TRUE; } /* Invoke the callback if the last character is illegal. */ if (strchr(HGFS_ILLEGAL_LAST_CHARS, bufIn[sizeIn - 1]) != NULL) { if (!processEscape(bufIn, sizeIn, HGFS_ESCAPE_ILLEGAL_LAST_CHARACTER, context)) { return FALSE; } } return TRUE; } #endif // WIN32 /* *----------------------------------------------------------------------------- * * HgfsIsEscapeSequence -- * * Verifies if input buffer has an escape sequence at the position * defined by offset. * * Results: * TRUE if there is an escape sequence at the position defined by offset. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static Bool HgfsIsEscapeSequence(char const *bufIn, // IN: input name uint32 offset, // IN: offset of the escape character uint32 length) // IN: length of the name in characters { if (bufIn[offset] == HGFS_ESCAPE_CHAR && offset > 0) { char *substitute; if (bufIn[offset - 1] == HGFS_ESCAPE_SUBSTITUE_CHAR && offset > 1) { /* * Possibly a valid sequence, check it must be preceded with a substitute * character or another escape-escape character. Otherwise, HGFS did * not generate this sequence and should leave it alone. */ if (bufIn[offset - 2] == HGFS_ESCAPE_SUBSTITUE_CHAR) { return TRUE; } substitute = strchr(HGFS_SUBSTITUTE_CHARS, bufIn[offset - 2]); if (substitute != NULL) { return TRUE; } } substitute = strchr(HGFS_SUBSTITUTE_CHARS, bufIn[offset - 1]); if (substitute != NULL) { return TRUE; } return IS_SPECIAL_CASE_ESCAPE(bufIn,offset,length); } return FALSE; } /* *----------------------------------------------------------------------------- * * HgfsEscapeEnumerate -- * * The function scans the input buffer and calls processEscape callback for every * place in the input buffer which require escaping. * * Callback does the required processing. There are two different callbacks - * one counts extra symbols that are needed for escaping and another produces * escaped output name based on input name. * * 1. The first function calculates number of extra characters. It just increments * a counter which is passed to it in context variable every time it is called * for the reason different from "complete processing" assuming that * exactly one extra character is required to escape any invalid input. * * 2. The second function produces output name by copying everything from input * name into the output name up to the place which require escaping and * then inserts appropriate escape sequence into the output. It keeps track of its * progress and keeps pointer to the output buffer in the context variable. * HgfsEscapeEnumerate calls calback function one more time at the end of the * input buffer to let callback finish processing of the input (for example copy * the rest of the name after the last escape sequence from input buffer to * output buffer). * * Results: * TRUE if the input has been processed successfully by the callback, false otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static Bool HgfsEscapeEnumerate(char const *bufIn, // IN: Buffer with unescaped input uint32 sizeIn, // IN: Number of input *characters* HgfsEnumCallback processEscape, // IN: Callack that is invoked every // time escaping is required void *context) // IN/OUT: Context for processEscape { /* First look for invalid characters in the input name. */ uint32 i, offset = 0; if (sizeIn == 0) { return TRUE; } ASSERT(processEscape); PROCESS_RESERVED_NAME(bufIn, sizeIn, processEscape, &offset, context); for (i = offset; i < sizeIn; i++) { if (strchr(HGFS_ILLEGAL_CHARS, bufIn[i]) != NULL) { if (!processEscape(bufIn, i, HGFS_ESCAPE_ILLEGAL_CHARACTER, context)) { return FALSE; } } else if (HgfsIsEscapeSequence(bufIn, i, sizeIn)) { if (!processEscape(bufIn, i, HGFS_ESCAPE_ESCAPE_SEQUENCE, context)) { return FALSE; } } } PROCESS_LAST_CHARACTER(bufIn, sizeIn, processEscape, context); if (!processEscape(bufIn, sizeIn, HGFS_ESCAPE_COMPLETE, context)) { return FALSE; } return TRUE; } /* *----------------------------------------------------------------------------- * * HgfsEscape_Do -- * * Escape any characters that are not legal in a windows filename. * Escape reserved file names that can't be used in Windows. * We also of course have to escape the escape character, which is "%", * when it is part of a character sequence that would require unescaping * * sizeBufOut must account for the NUL terminator. * * Results: * On success, the size (excluding the NUL terminator) of the * escaped, NUL terminated buffer. * On failure (bufOut not big enough to hold result), negative value. * * Side effects: * None * *----------------------------------------------------------------------------- */ int HgfsEscape_Do(char const *bufIn, // IN: Buffer with unescaped input uint32 sizeIn, // IN: Size of input buffer uint32 sizeBufOut, // IN: Size of output buffer char *bufOut) // OUT: Buffer for escaped output { const char *currentComponent = bufIn; uint32 sizeLeft = sizeBufOut; char *outPointer = bufOut; const char *end = bufIn + sizeIn; const char *next; ASSERT(sizeIn > 0); if (bufIn[sizeIn - 1] == '\0') { /* * In some cases a NUL terminated string is passed to HgfsEscape_Do * so it make sense to support such input even if CPName_GetComponent * does not. Detect this case and make the input compliant with * CPName_GetComponent by removing terminating NUL. */ end--; sizeIn--; } /* * Absolute symbolic link name starts with the '\0'. HgfsEscapeDo needs to work * with such names. Leading NULL symbols should be skipped here since * CPName_GetComponent does not support such names. */ while (*currentComponent == '\0' && currentComponent - bufIn < sizeIn) { currentComponent++; sizeLeft--; *outPointer++ = '\0'; } while (currentComponent - bufIn < sizeIn) { int escapedLength; int componentSize = CPName_GetComponent(currentComponent, end, &next); if (componentSize < 0) { return componentSize; } escapedLength = HgfsEscapeDoComponent(currentComponent, componentSize, sizeLeft, outPointer); if (escapedLength < 0) { return escapedLength; } currentComponent = next; sizeLeft -= escapedLength + 1; outPointer += escapedLength + 1; } return (int) (outPointer - bufOut) - 1; // Do not count the last NUL terminator } /* *----------------------------------------------------------------------------- * * HgfsEscape_GetSize -- * * Calculates required size in bytes for the buffer that is needed to hold escaped * cross platform path name. Returns 0 if no escaping is required. * * Results: * On success, the size (excluding the NUL terminator) of the * escaped, NUL terminated buffer. * Returns 0 if the name is a valid Windows file name. * Returns -1 if the name is not a valid file name. * * Side effects: * None * *----------------------------------------------------------------------------- */ int HgfsEscape_GetSize(char const *bufIn, // IN: Buffer with unescaped input uint32 sizeIn) // IN: Size of the input buffer { uint32 result = 0; const char *currentComponent = bufIn; const char *end = bufIn + sizeIn; const char *next; if (sizeIn == 0) { // No need to escape an empty name. return 0; } if (bufIn[sizeIn - 1] == '\0') { /* * In some cases, a NUL-terminated string is passed to HgfsEscape_GetSize, * so it makes sense to support such input even if CPName_GetComponent * does not. Detect this case and make the input compliant with * CPName_GetComponent by removing the terminating NUL. */ end--; sizeIn--; } /* Skip leading NULs to keep CPName_GetComponent happy. */ while (*currentComponent == '\0' && currentComponent - bufIn < sizeIn) { currentComponent++; } while (currentComponent - bufIn < sizeIn) { int componentSize = CPName_GetComponent(currentComponent, end, &next); if (componentSize < 0) { Log("%s: failed to calculate escaped name size - name is invalid\n", __FUNCTION__); return -1; } result += HgfsEscapeGetComponentSize(currentComponent, componentSize); currentComponent = next; } return (result == 0) ? 0 : result + sizeIn; } /* *----------------------------------------------------------------------------- * * HgfsEscape_Undo -- * * Unescape a buffer that was escaped using HgfsEscapeBuffer. * * The unescaping is done in place in the input buffer, and * can not fail. * * Results: * The size (excluding the NUL terminator) of the unescaped, NUL * terminated buffer. * * Side effects: * None * *----------------------------------------------------------------------------- */ uint32 HgfsEscape_Undo(char *bufIn, // IN: Characters to be unescaped uint32 sizeIn) // IN: Number of characters in bufIn { uint32 componentSize; uint32 unprocessedSize = sizeIn + 1; uint32 result = 0; char *currentComponent = bufIn; ASSERT(bufIn != NULL); while (currentComponent != NULL) { HgfsEscapeUndoComponent(currentComponent, &unprocessedSize); componentSize = strlen(currentComponent) + 1; // Unescaped size result += componentSize; if (unprocessedSize > 1) { currentComponent = currentComponent + componentSize; componentSize = strlen(currentComponent) + 1; // Size of the next component } else { currentComponent = NULL; } } return result - 1; } /* *----------------------------------------------------------------------------- * * HgfsEscapeUndoComponent -- * * Unescape a buffer that was escaped using HgfsEscapeBuffer. * * The unescaping is done in place in the input buffer, and * can not fail. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ void HgfsEscapeUndoComponent(char *bufIn, // IN: Characters to be unescaped uint32 *unprocessedLength) // IN: Unprocessed characters // in the whole name { size_t offset; size_t sizeIn; char* curOutBuffer; char* escapePointer; ASSERT(bufIn != NULL); curOutBuffer = bufIn; sizeIn = strlen(curOutBuffer); escapePointer = strchr(curOutBuffer, HGFS_ESCAPE_CHAR); while (escapePointer != NULL) { offset = escapePointer - bufIn; if (HgfsIsEscapeSequence(bufIn, offset, sizeIn)) { char* substitute = strchr(HGFS_SUBSTITUTE_CHARS, bufIn[offset - 1]); if (substitute != NULL) { bufIn[offset - 1] = HGFS_ILLEGAL_CHARS[substitute - HGFS_SUBSTITUTE_CHARS]; } else if (bufIn[offset - 1] == HGFS_ESCAPE_SUBSTITUE_CHAR) { bufIn[offset - 1] = HGFS_ESCAPE_CHAR; } memmove(escapePointer, escapePointer + 1, (*unprocessedLength) - offset - 1); (*unprocessedLength)--; sizeIn--; if (sizeIn > 0) { escapePointer = strchr(escapePointer, HGFS_ESCAPE_CHAR); } else { escapePointer = NULL; } } else { escapePointer = strchr(escapePointer + 1, HGFS_ESCAPE_CHAR); } } ASSERT((*unprocessedLength) > sizeIn); (*unprocessedLength) -= sizeIn + 1; } /* *----------------------------------------------------------------------------- * * HgfsEscapeDoComponent -- * * Escape any characters that are not legal in a windows filename. * Escape reserved file names that can't be used in Windows. * We also of course have to escape the escape character, which is "%", * when it is part of a character sequence that would require unescaping * * sizeBufOut must account for the NUL terminator. * * Results: * On success, the size (excluding the NUL terminator) of the * escaped, NUL terminated buffer. * On failure (bufOut not big enough to hold result), negative value. * * Side effects: * None * *----------------------------------------------------------------------------- */ int HgfsEscapeDoComponent(char const *bufIn, // IN: Buffer with unescaped input uint32 sizeIn, // IN: Size of input buffer uint32 sizeBufOut, // IN: Size of output buffer char *bufOut) // OUT: Buffer for escaped output { HgfsEscapeContext conversionContext; conversionContext.processedOffset = 0; conversionContext.outputBufferLength = sizeBufOut / sizeof *bufOut; conversionContext.outputOffset = 0; conversionContext.outputBuffer = bufOut; if (!HgfsEscapeEnumerate(bufIn, sizeIn, HgfsAddEscapeCharacter, &conversionContext)) { return -1; } return conversionContext.outputOffset * sizeof *bufOut; } /* *----------------------------------------------------------------------------- * * HgfsEscapeGetComponentSize -- * * Calculates number of addtitional characters that are needed to escape * name for one NUL terminated component of the path. * * Results: * Number of additional escape characters needed to escape the name. * Returns 0 if no escaping is required. * * Side effects: * None * *----------------------------------------------------------------------------- */ int HgfsEscapeGetComponentSize(char const *bufIn, // IN: Buffer with unescaped input uint32 sizeIn) // IN: Size of the in input buffer { int result = 0; HgfsEscapeEnumerate(bufIn, sizeIn, HgfsCountEscapeChars, &result); return result; } vmhgfs-only/hgfs.h 0000444 0000000 0000000 00000020545 13432725350 013135 0 ustar root root /********************************************************* * Copyright (C) 1998-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * hgfs.h -- * * Header file for public common data types used in the VMware * Host/Guest File System (hgfs). * * This file is included by hgfsProto.h, which defines message formats * used in the hgfs protocol, and by hgfsDev.h, which defines the * interface between the kernel and the hgfs pserver. [bac] */ #ifndef _HGFS_H_ # define _HGFS_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #include "includeCheck.h" #include "vm_assert.h" /* * Maximum number of pages to transfer to/from the HGFS server for V3 protocol * operations that support large requests/replies, e.g. reads and writes. */ #define HGFS_LARGE_IO_MAX_PAGES 15 /* * Maximum allowed packet size in bytes. All hgfs code should be made * safe with respect to this limit. */ #define HGFS_PACKET_MAX 6144 /* * The HGFS_LARGE_PACKET_MAX size is used to allow guests to make * read / write requests of sizes larger than HGFS_PACKET_MAX. The larger size * can only be used with server operations that are specified to be large packet * capable in hgfsProto.h. */ #define HGFS_LARGE_PACKET_MAX ((4096 * HGFS_LARGE_IO_MAX_PAGES) + 2048) /* Maximum number of bytes to read or write to a hgfs server in a single packet. */ #define HGFS_IO_MAX 4096 /* Maximum number of bytes to read or write to a V3 server in a single hgfs packet. */ #define HGFS_LARGE_IO_MAX (HGFS_LARGE_IO_MAX_PAGES * 4096) /* * File type * * File types, used in HgfsAttr. We support regular files, * directories, and symlinks. * * Changing the order of this enum will break the protocol; new types * should be added at the end. * * * This definition is used in some places that don't include * hgfsProto.h, which is why it is here instead of there. */ typedef enum { HGFS_FILE_TYPE_REGULAR, HGFS_FILE_TYPE_DIRECTORY, HGFS_FILE_TYPE_SYMLINK, } HgfsFileType; /* * Open mode * * These are equivalent to the O_RDONLY, O_WRONLY, O_RDWR open flags * in Unix; they specify which type of access is being requested. These three * modes are mutually exclusive and one is required; all other flags are * modifiers to the mode and must come afterwards as a bitmask. Beware that * HGFS_OPEN_MODE_READ_ONLY contains the value 0 so simply masking another * variable with it to detect its presence is not safe. The _ACCMODES entry in * the enum serves as a bitmask for the others. * * Changing the order of this enum will break stuff. * * This definition is used in some places that don't include * hgfsProto.h, which is why it is here instead of there. */ typedef enum { HGFS_OPEN_MODE_READ_ONLY, HGFS_OPEN_MODE_WRITE_ONLY, HGFS_OPEN_MODE_READ_WRITE, HGFS_OPEN_MODE_ACCMODES, /* You cannot add anything else here. Really. */ } HgfsOpenMode; /* * Open flags. * * Each should be shifted left by HGFS_OPEN_MODE_READ_WRITE plus whatever flag * number they are, starting with zero. * * The sequential flag indicates that reads and writes on this handle should * not seek on each operation; instead, the system's file pointer will be used * so each operation is performed where the last one finished. This flag is * necessary when reading from or writing to non-seekable files (such as procfs * nodes on Linux) but can also lead to inconsistent results if a client shares * a handle amongst several of its callers. This flag should only be used when * the client knows the file is non-seekable and the burden of ensuring file * handles aren't shared falls upon the hgfs client, not the server. */ #define HGFS_OPEN_SEQUENTIAL (1 << HGFS_OPEN_MODE_READ_WRITE) /* Masking helpers. */ #define HGFS_OPEN_MODE_ACCMODE(mode) (mode & HGFS_OPEN_MODE_ACCMODES) #define HGFS_OPEN_MODE_FLAGS(mode) (mode & ~HGFS_OPEN_MODE_ACCMODES) #define HGFS_OPEN_MODE_IS_VALID_MODE(mode) \ (HGFS_OPEN_MODE_ACCMODE(mode) == HGFS_OPEN_MODE_READ_ONLY || \ HGFS_OPEN_MODE_ACCMODE(mode) == HGFS_OPEN_MODE_WRITE_ONLY || \ HGFS_OPEN_MODE_ACCMODE(mode) == HGFS_OPEN_MODE_READ_WRITE) /* * Return status for replies from the server. * * Changing the order of this enum will break the protocol; new status * types should be added at the end. * * This definition is used in some places that don't include * hgfsProto.h, which is why it is here instead of there. * * XXX: So we have a problem here. At some point, HGFS_STATUS_INVALID_NAME was * added to the list of errors. Later, HGFS_STATUS_GENERIC_ERROR was added, but * it was added /before/ HGFS_STATUS_INVALID_NAME. Nobody noticed because the * error codes travelled from hgfsProto.h to hgfs.h in that same change. Worse, * we GA'ed a product (Server 1.0) this way. * * XXX: I've reversed the order because otherwise new HGFS clients working * against WS55-era HGFS servers will think they got HGFS_STATUS_GENERIC_ERROR * when the server sent them HGFS_STATUS_INVALID_NAME. This was a problem * the Linux client converts HGFS_STATUS_GENERIC_ERROR to -EIO, which causes * HgfsLookup to fail unexpectedly (normally HGFS_STATUS_INVALID_NAME is * converted to -ENOENT, an expected result in HgfsLookup). */ typedef enum { HGFS_STATUS_SUCCESS, HGFS_STATUS_NO_SUCH_FILE_OR_DIR, HGFS_STATUS_INVALID_HANDLE, HGFS_STATUS_OPERATION_NOT_PERMITTED, HGFS_STATUS_FILE_EXISTS, HGFS_STATUS_NOT_DIRECTORY, HGFS_STATUS_DIR_NOT_EMPTY, HGFS_STATUS_PROTOCOL_ERROR, HGFS_STATUS_ACCESS_DENIED, HGFS_STATUS_INVALID_NAME, HGFS_STATUS_GENERIC_ERROR, HGFS_STATUS_SHARING_VIOLATION, HGFS_STATUS_NO_SPACE, HGFS_STATUS_OPERATION_NOT_SUPPORTED, HGFS_STATUS_NAME_TOO_LONG, HGFS_STATUS_INVALID_PARAMETER, HGFS_STATUS_NOT_SAME_DEVICE, /* * Following error codes are for V4 and above protocol only. * Server must never retun these codes for legacy clients. */ HGFS_STATUS_STALE_SESSION, HGFS_STATUS_TOO_MANY_SESSIONS, HGFS_STATUS_TRANSPORT_ERROR, } HgfsStatus; /* * HGFS RPC commands * * HGFS servers can run in a variety of places across several different * transport layers. These definitions constitute all known RPC commands. * * For each definition, there is both the server string (the command itself) * as well as a client "prefix", which is the command followed by a space. * This is provided for convenience, since clients will need to copy both * the command and the space into some buffer that is then sent over the * backdoor. * * In Host --> Guest RPC traffic, the host endpoint is TCLO and the guest * endpoint is RpcIn. TCLO is a particularly confusing name choice which dates * back to when the host was to send raw TCL code to the guest (TCL Out == * TCLO). * * In Guest --> Host RPC traffic, the guest endpoint is RpcOut and the host * endpoint is RPCI. */ /* * When an RPCI listener registers for this command, HGFS requests are expected * to be synchronously sent from the guest and replies are expected to be * synchronously returned. * * When an RpcIn listener registers for this command, requests are expected to * be asynchronously sent from the host and synchronously returned from the * guest. * * In short, an endpoint sending this command is sending a request whose reply * should be returned synchronously. */ #define HGFS_SYNC_REQREP_CMD "f" #define HGFS_SYNC_REQREP_CLIENT_CMD HGFS_SYNC_REQREP_CMD " " #define HGFS_SYNC_REQREP_CLIENT_CMD_LEN (sizeof HGFS_SYNC_REQREP_CLIENT_CMD - 1) /* * This is just for the sake of macro naming. Since we are guaranteed * equal command lengths, defining command length via a generalized macro name * will prevent confusion. */ #define HGFS_CLIENT_CMD_LEN HGFS_SYNC_REQREP_CLIENT_CMD_LEN #endif // _HGFS_H_ vmhgfs-only/dir.c 0000444 0000000 0000000 00000126712 13432725306 012763 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * dir.c -- * * Directory operations for the filesystem portion of the vmhgfs driver. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include <linux/errno.h> #include <linux/module.h> #include "compat_fs.h" #include "compat_kernel.h" #include "compat_slab.h" #include "compat_mutex.h" #include "cpName.h" #include "hgfsEscape.h" #include "hgfsProto.h" #include "hgfsUtil.h" #include "module.h" #include "request.h" #include "fsutil.h" #include "vm_assert.h" #include "vm_basic_types.h" /* Private functions. */ static int HgfsPrivateDirReOpen(struct file *file); static int HgfsPrivateDirOpen(struct file *file, HgfsHandle *handle); static int HgfsPrivateDirRelease(struct file *file, HgfsHandle handle); static int HgfsUnpackSearchReadReply(HgfsReq *req, HgfsAttrInfo *attr, char **entryName); static int HgfsGetNextDirEntry(HgfsSuperInfo *si, HgfsHandle searchHandle, uint32 offset, HgfsAttrInfo *attr, char **entryName, Bool *done); static int HgfsPackDirOpenRequest(struct file *file, HgfsOp opUsed, HgfsReq *req); static Bool HgfsReaddirFillEntry(filldir_t filldirCb, void *context, char *entryName, uint32 entryNameLength, loff_t entryPos, ino_t entryIno, uint32 entryType); /* HGFS file operations for directories. */ static int HgfsDirOpen(struct inode *inode, struct file *file); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) static int HgfsReaddir(struct file *file, struct dir_context *ctx); #else static int HgfsReaddir(struct file *file, void *dirent, filldir_t filldir); #endif static int HgfsDirRelease(struct inode *inode, struct file *file); static loff_t HgfsDirLlseek(struct file *file, loff_t offset, int origin); /* HGFS file operations structure for directories. */ struct file_operations HgfsDirFileOperations = { .llseek = HgfsDirLlseek, .owner = THIS_MODULE, .open = HgfsDirOpen, .read = generic_read_dir, #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) .iterate = HgfsReaddir, #else .readdir = HgfsReaddir, #endif .release = HgfsDirRelease, }; /* * Private function implementations. */ /* *---------------------------------------------------------------------- * * HgfsUnpackSearchReadReply -- * * This function abstracts the differences between a SearchReadV1 and * a SearchReadV2. The caller provides the packet containing the reply * and we populate the AttrInfo with version-independent information. * * Note that attr->requestType has already been populated so that we * know whether to expect a V1 or V2 reply. * * Results: * 0 on success, anything else on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsUnpackSearchReadReply(HgfsReq *req, // IN: Reply packet HgfsAttrInfo *attr, // IN/OUT: Attributes char **entryName) // OUT: file name { char *fileName; uint32 fileNameLength; uint32 replySize; int result; ASSERT(req); ASSERT(attr); result = HgfsUnpackCommonAttr(req, attr); if (result != 0) { return result; } switch(attr->requestType) { case HGFS_OP_SEARCH_READ_V3: { HgfsReplySearchReadV3 *replyV3; HgfsDirEntry *dirent; /* Currently V3 returns only 1 entry. */ replyV3 = (HgfsReplySearchReadV3 *)(HGFS_REP_PAYLOAD_V3(req)); replyV3->count = 1; replySize = HGFS_REP_PAYLOAD_SIZE_V3(replyV3) + sizeof *dirent; dirent = (HgfsDirEntry *)replyV3->payload; fileName = dirent->fileName.name; fileNameLength = dirent->fileName.length; break; } case HGFS_OP_SEARCH_READ_V2: { HgfsReplySearchReadV2 *replyV2; replyV2 = (HgfsReplySearchReadV2 *)(HGFS_REQ_PAYLOAD(req)); replySize = sizeof *replyV2; fileName = replyV2->fileName.name; fileNameLength = replyV2->fileName.length; break; } case HGFS_OP_SEARCH_READ: { HgfsReplySearchRead *replyV1; replyV1 = (HgfsReplySearchRead *)(HGFS_REQ_PAYLOAD(req)); replySize = sizeof *replyV1; fileName = replyV1->fileName.name; fileNameLength = replyV1->fileName.length; break; } default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackSearchReadReply: unexpected " "OP type encountered\n")); return -EPROTO; } /* * Make sure name length is legal. */ if (fileNameLength > NAME_MAX || fileNameLength > req->bufferSize - replySize) { return -ENAMETOOLONG; } /* * If the size of the name is valid (meaning the end of the directory has * not yet been reached), copy the name to the AttrInfo struct. * * XXX: This operation happens often and the length of the filename is * bounded by NAME_MAX. Perhaps I should just put a statically-sized * array in HgfsAttrInfo and use a slab allocator to allocate the struct. */ if (fileNameLength > 0) { /* Sanity check on name length. */ if (fileNameLength != strlen(fileName)) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackSearchReadReply: name " "length mismatch %u/%Zu, name \"%s\"\n", fileNameLength, strlen(fileName), fileName)); return -EPROTO; } *entryName = kmalloc(fileNameLength + 1, GFP_KERNEL); if (*entryName == NULL) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackSearchReadReply: out of " "memory allocating filename, ignoring\n")); return -ENOMEM; } memcpy(*entryName, fileName, fileNameLength + 1); } else { *entryName = NULL; } return 0; } /* *---------------------------------------------------------------------- * * HgfsGetNextDirEntry -- * * Get the directory entry with the given offset from the server. * * fileName gets allocated and must be freed by the caller. * * Results: * Returns zero on success, negative error on failure. If the * dentry's name is too long, -ENAMETOOLONG is returned. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsGetNextDirEntry(HgfsSuperInfo *si, // IN: Superinfo for this SB HgfsHandle searchHandle, // IN: Handle of dir uint32 offset, // IN: Offset of next dentry to get HgfsAttrInfo *attr, // OUT: File attributes of dentry char **entryName, // OUT: File name Bool *done) // OUT: Set true when there are // no more dentries { HgfsReq *req; HgfsOp opUsed; HgfsStatus replyStatus; int result = 0; ASSERT(si); ASSERT(attr); ASSERT(done); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: out of memory " "while getting new request\n")); return -ENOMEM; } retry: opUsed = hgfsVersionSearchRead; if (opUsed == HGFS_OP_SEARCH_READ_V3) { HgfsRequest *header; HgfsRequestSearchReadV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->op = attr->requestType = opUsed; header->id = req->id; request = (HgfsRequestSearchReadV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->search = searchHandle; request->offset = offset; request->flags = 0; request->reserved = 0; req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestSearchRead *request; request = (HgfsRequestSearchRead *)(HGFS_REQ_PAYLOAD(req)); request->header.op = attr->requestType = opUsed; request->header.id = req->id; request->search = searchHandle; request->offset = offset; req->payloadSize = sizeof *request; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: got reply\n")); replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch(result) { case 0: result = HgfsUnpackSearchReadReply(req, attr, entryName); if (result == 0 && *entryName == NULL) { /* We're at the end of the directory. */ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: end of " "dir\n")); *done = TRUE; } break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (attr->requestType == HGFS_OP_SEARCH_READ_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: Version 3 " "not supported. Falling back to version 2.\n")); hgfsVersionSearchRead = HGFS_OP_SEARCH_READ_V2; goto retry; } else if (attr->requestType == HGFS_OP_SEARCH_READ_V2) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: Version 2 " "not supported. Falling back to version 1.\n")); hgfsVersionSearchRead = HGFS_OP_SEARCH_READ; goto retry; } /* Fallthrough. */ default: break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetNextDirEntry: unknown error: " "%d\n", result)); } HgfsFreeRequest(req); return result; } /* *---------------------------------------------------------------------- * * HgfsPackDirOpenRequest -- * * Setup the directory open request, depending on the op version. * * Results: * Returns zero on success, or negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsPackDirOpenRequest(struct file *file, // IN: File pointer for this open HgfsOp opUsed, // IN: Op to be used HgfsReq *req) // IN/OUT: Packet to write into { char *name; uint32 *nameLength; size_t requestSize; int result; ASSERT(file); ASSERT(req); switch (opUsed) { case HGFS_OP_SEARCH_OPEN_V3: { HgfsRequest *requestHeader; HgfsRequestSearchOpenV3 *requestV3; requestHeader = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); requestHeader->op = opUsed; requestHeader->id = req->id; requestV3 = (HgfsRequestSearchOpenV3 *)HGFS_REQ_PAYLOAD_V3(req); /* We'll use these later. */ name = requestV3->dirName.name; nameLength = &requestV3->dirName.length; requestV3->dirName.flags = 0; requestV3->dirName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; requestV3->dirName.fid = HGFS_INVALID_HANDLE; requestV3->reserved = 0; requestSize = HGFS_REQ_PAYLOAD_SIZE_V3(requestV3); break; } case HGFS_OP_SEARCH_OPEN: { HgfsRequestSearchOpen *request; request = (HgfsRequestSearchOpen *)(HGFS_REQ_PAYLOAD(req)); request->header.op = opUsed; request->header.id = req->id; /* We'll use these later. */ name = request->dirName.name; nameLength = &request->dirName.length; requestSize = sizeof *request; break; } default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackDirOpenRequest: unexpected " "OP type encountered\n")); return -EPROTO; } /* Build full name to send to server. */ if (HgfsBuildPath(name, req->bufferSize - (requestSize - 1), file->f_dentry) < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackDirOpenRequest: build path failed\n")); return -EINVAL; } LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackDirOpenRequest: opening \"%s\"\n", name)); /* Convert to CP name. */ result = CPName_ConvertTo(name, req->bufferSize - (requestSize - 1), name); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackDirOpenRequest: CP conversion failed\n")); return -EINVAL; } *nameLength = (uint32) result; req->payloadSize = requestSize + result; return 0; } /* *---------------------------------------------------------------------- * * HgfsPrivateDirOpen -- * * Called by HgfsDirOpen() and HgfsReaddir() routines. * * Results: * Returns zero if on success, error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsPrivateDirOpen(struct file *file, // IN: File pointer for this open HgfsHandle *handle) // IN: Hgfs handle { HgfsReq *req; int result; HgfsOp opUsed; HgfsStatus replyStatus; HgfsHandle *replySearch; ASSERT(file); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionSearchOpen; if (opUsed == HGFS_OP_SEARCH_OPEN_V3) { replySearch = &((HgfsReplySearchOpenV3 *)HGFS_REP_PAYLOAD_V3(req))->search; } else { replySearch = &((HgfsReplySearchOpen *)HGFS_REQ_PAYLOAD(req))->search; } result = HgfsPackDirOpenRequest(file, opUsed, req); if (result != 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen error packing request\n")); goto out; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply and check return status. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: /* Save the handle value */ *handle = *replySearch; LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: Handle returned = %u\n", *replySearch)); break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_SEARCH_OPEN_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionSearchOpen = HGFS_OP_SEARCH_OPEN; goto retry; } LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: server " "returned error: %d\n", result)); break; default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: server " "returned error: %d\n", result)); break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirOpen: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; } /* *---------------------------------------------------------------------- * * HgfsPrivateDirRelease -- * * Called by HgfsDirRelease() and HgfsReaddir() routines. * * Results: * Returns zero on success, or an error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsPrivateDirRelease(struct file *file, // IN: File for the dir getting released HgfsHandle handle) // IN: Hgfs handle { HgfsReq *req; HgfsStatus replyStatus; HgfsOp opUsed; int result = 0; ASSERT(file); ASSERT(file->f_dentry); ASSERT(file->f_dentry->d_sb); LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: close fh %u\n", handle)); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionSearchClose; if (opUsed == HGFS_OP_SEARCH_CLOSE_V3) { HgfsRequestSearchCloseV3 *request; HgfsRequest *header; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestSearchCloseV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->search = handle; request->reserved = 0; req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestSearchClose *request; request = (HgfsRequestSearchClose *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->search = handle; req->payloadSize = sizeof *request; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: release handle %u\n", handle)); break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_SEARCH_CLOSE_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionSearchClose = HGFS_OP_SEARCH_CLOSE; goto retry; } break; default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: failed handle %u\n", handle)); break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPrivateDirRelease: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; } /* *---------------------------------------------------------------------- * * HgfsPrivateDirReOpen -- * * Reopens the file. Called by HgfsReaddir() routine. * * Results: * Returns zero if on success, error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsPrivateDirReOpen(struct file *file) // IN: File pointer for this open { int result = 0; HgfsHandle *handle = &FILE_GET_FI_P(file)->handle; LOG(4, (KERN_DEBUG "HgfsPrivateDirReOpen: Directory handle in invalid;" "Reopening ...\n")); result = HgfsPrivateDirRelease(file, *handle); if (result) { return result; } result = HgfsPrivateDirOpen(file, handle); if (result) { return result; } FILE_GET_FI_P(file)->isStale = FALSE; return result; } /* * HGFS file operations for directories. */ /* *---------------------------------------------------------------------- * * HgfsDirLlseek -- * * Called whenever a process does rewinddir() or telldir()/seekdir(). * * Results: * Returns zero if on success, error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static loff_t HgfsDirLlseek(struct file *file, loff_t offset, int origin) { struct dentry *dentry = file->f_dentry; struct inode *inode = dentry->d_inode; compat_mutex_t *mtx; LOG(4, (KERN_DEBUG "Got llseek call with origin = %d, offset = %u," "pos = %u\n", origin, (uint32)offset, (uint32)file->f_pos)); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16) mtx = &inode->i_sem; #else mtx = &inode->i_mutex; #endif compat_mutex_lock(mtx); switch(origin) { /* SEEK_CUR */ case 1: offset += file->f_pos; break; /* SEEK_SET */ case 0: break; /* SEEK_END */ case 2: default: offset = -EINVAL; break; } if (offset < 0) { offset = -EINVAL; goto out; } if (offset != file->f_pos) { file->f_pos = offset; } /* * rewinddir() semantics says that It causes the directory stream * to refer to the current state of the corresponding directory, * as a call to opendir would have done. So when rewinddir() happens, * we mark current directory as stale, so that subsequent readdir() * call will reopen() the directory. * * XXX telldir()/seekdir() semantics does not say that we need to refer * to the current state of a directory. However, an application that does * following: telldir() -> rmdir(current_entry) -> seekdir() and checking * whether entry was deleted or not, will break. I have no evidence of an * application relying on above behavior, so let's not incur extra cost * by reopening directory on telldir()/seekdir() combination. Note: A special * case of telldir()/seekdir() to offset 0 will behave same as rewinddir(). */ if (!file->f_pos) { FILE_GET_FI_P(file)->isStale = TRUE; } out: compat_mutex_unlock(mtx); return offset; } /* *---------------------------------------------------------------------- * * HgfsDirOpen -- * * Called whenever a process opens a directory in our filesystem. * * We send a "Search Open" request to the server with the name * stored in this file's inode. If the Open succeeds, we store the * search handle sent by the server in the file struct so it can be * accessed by readdir and close. * * Results: * Returns zero if on success, error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsDirOpen(struct inode *inode, // IN: Inode of the dir to open struct file *file) // IN: File pointer for this open { int result; HgfsHandle handle; ASSERT(inode); ASSERT(inode->i_sb); ASSERT(file); result = HgfsPrivateDirOpen(file, &handle); if (!result) { result = HgfsCreateFileInfo(file, handle); } return result; } /* *---------------------------------------------------------------------- * * HgfsReaddirRefreshEntries -- * * refresh the file entries if the handle is stale by reopening. * * Results: * Zero on success, otherwise failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsReaddirRefreshEntries(struct file *file) // IN: File pointer for this open { int result = 0; /* * rm -rf 6.10+ breaks because it does following: * an 'fd = open()' on a directory, followed by unlinkat() * which removes an entry from the directory it and then * fdopendir(fd). We get a call on open() but not on fdopendir(), * which means that we do not reflect the action of unlinkat(), * and thus rm -rf gets confused and marking entry as unremovable. * Note that this problem exists because hgfsServer reads all * the directory entries at open(). Interested reader may look at * coreutils/src/remove.c file. * * So as a workaround, we ask the server to populate entries on * first readdir() call rather than opendir(). This effect is * achieved by closing and reopening the directory. Grrr! * * XXX We should get rid of this code when/if we remove the above * behavior from hgfsServer. */ if (FILE_GET_FI_P(file)->isStale) { result = HgfsPrivateDirReOpen(file); } LOG(6, (KERN_DEBUG "VMware hgfs: %s: error: stale handle (%s) return %d)\n", __func__, file->f_dentry->d_name.name, result)); return result; } /* *---------------------------------------------------------------------- * * HgfsGetFileInode -- * * Get file inode from the hgfs attributes or generate from the super block. * * Results: * The inode entry. * * Side effects: * None * *---------------------------------------------------------------------- */ static ino_t HgfsGetFileInode(HgfsAttrInfo const *attr, // IN: Attrs to use struct super_block *sb) // IN: Superblock of this fs { ino_t inodeEntry; uint64 tempIno; HgfsSuperInfo *si; ASSERT(attr); ASSERT(sb); si = HGFS_SB_TO_COMMON(sb); if ((si->mntFlags & HGFS_MNT_SERVER_INUM) != 0 && (attr->mask & HGFS_ATTR_VALID_FILEID) != 0) { tempIno = attr->hostFileId; } else { tempIno = iunique(sb, HGFS_RESERVED_INO); } inodeEntry = HgfsUniqueidToIno(tempIno); LOG(4, (KERN_DEBUG "VMware hgfs: %s: return %lu\n", __func__, inodeEntry)); return inodeEntry; } /* *---------------------------------------------------------------------- * * HgfsGetFileType -- * * Get file type according to the hgfs attributes. * * Results: * The file type. * * Side effects: * None * *---------------------------------------------------------------------- */ static uint32 HgfsGetFileType(HgfsAttrInfo const *attr) // IN: Attrs to use { uint32 type; ASSERT(attr); switch (attr->type) { case HGFS_FILE_TYPE_SYMLINK: type = DT_LNK; break; case HGFS_FILE_TYPE_REGULAR: type = DT_REG; break; case HGFS_FILE_TYPE_DIRECTORY: type = DT_DIR; break; default: /* * XXX Should never happen. I'd put NOT_IMPLEMENTED() here * but if the driver ever goes in the host it's probably not * a good idea for an attacker to be able to hang the host * simply by using a bogus file type in a reply. [bac] * * Well it happens! Refer bug 548177 for details. In short, * when the user deletes a share, we hit this code path. * */ type = DT_UNKNOWN; break; } LOG(4, (KERN_DEBUG "VMware hgfs: %s: return %d\n", __func__, type)); return type; } /* *---------------------------------------------------------------------- * * HgfsReaddirNextEntry -- * * Called whenever a process opens a directory in our filesystem. * * We send a "Search Open" request to the server with the name * stored in this file's inode. If the Open succeeds, we store the * search handle sent by the server in the file struct so it can be * accessed by readdir and close. * * Results: * Returns zero if on success, error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsReaddirNextEntry(struct file *file, // IN: file loff_t entryPos, // IN: position Bool dotAndDotDotIgnore, // IN: ignore "." and ".." size_t entryNameBufLen, // IN: name buffer length char *entryName, // OUT: entry name uint32 *entryNameLength, // OUT: max name length ino_t *entryIno, // OUT: inode entry number uint32 *entryType, // OUT: entry type Bool *entryIgnore, // OUT: ignore this entry or not Bool *entryEnd) // OUT: no more entries { HgfsSuperInfo *si; HgfsAttrInfo entryAttrs; char *fileName = NULL; int result; ASSERT(file->f_dentry->d_inode->i_sb); si = HGFS_SB_TO_COMMON(file->f_dentry->d_inode->i_sb); *entryIgnore = FALSE; /* * Nonzero result = we failed to get valid reply from server. * Zero result: * - done == TRUE means we hit the end of the directory * - Otherwise, fileName has the name of the next dirent * */ result = HgfsGetNextDirEntry(si, FILE_GET_FI_P(file)->handle, (uint32)entryPos, &entryAttrs, &fileName, entryEnd); if (result == -ENAMETOOLONG) { /* * Skip dentry if its name is too long (see below). * * XXX: If a bad server sends us bad packets, we can loop here * forever, as I did while testing *grumble*. Maybe we should error * in that case. */ LOG(4, (KERN_DEBUG "VMware hgfs: %s: error getnextdentry name %d\n", __func__, result)); *entryIgnore = TRUE; result = 0; goto exit; } else if (result) { /* Error */ LOG(4, (KERN_DEBUG "VMware hgfs: %s: error getnextdentry %d\n", __func__, result)); goto exit; } if (*entryEnd) { LOG(10, (KERN_DEBUG "VMware hgfs: %s: end of dir reached\n", __func__)); goto exit; } /* * Escape all non-printable characters (which for linux is just * "/"). * * Note that normally we would first need to convert from the * CP name format, but that is done implicitely here since we * are guaranteed to have just one path component per dentry. */ result = HgfsEscape_Do(fileName, strlen(fileName), entryNameBufLen, entryName); kfree(fileName); fileName = NULL; /* * Check the filename length. * * If the name is too long to be represented in linux, we simply * skip it (i.e., that file is not visible to our filesystem). * * HgfsEscape_Do returns a negative value if the escaped * output didn't fit in the specified output size, so we can * just check its return value. */ if (result < 0) { /* * XXX: Another area where a bad server could cause us to loop * forever. */ *entryIgnore = TRUE; result = 0; goto exit; } *entryNameLength = result; result = 0; /* * It is unfortunate, but the HGFS server sends back '.' and ".." * when we do a SearchRead. In an ideal world, these would be faked * on the client, but it would be a real backwards-compatibility * hassle to change the behavior at this point. * * So instead, we'll take the '.' and ".." and modify their inode * numbers so they match what the client expects. */ if (!strncmp(entryName, ".", sizeof ".")) { if (!dotAndDotDotIgnore) { *entryIno = file->f_dentry->d_inode->i_ino; } else { *entryIgnore = TRUE; } } else if (!strncmp(entryName, "..", sizeof "..")) { if (!dotAndDotDotIgnore) { *entryIno = compat_parent_ino(file->f_dentry); } else { *entryIgnore = TRUE; } } else { *entryIno = HgfsGetFileInode(&entryAttrs, file->f_dentry->d_inode->i_sb); } if (*entryIgnore) { goto exit; } /* Assign the correct dentry type. */ *entryType = HgfsGetFileType(&entryAttrs); exit: return result; } /* *---------------------------------------------------------------------- * * HgfsDoReaddir -- * * Handle a readdir request. See details below if interested. * * Readdir is a bit complicated, and is best understood by reading * the code. For the impatient, here is an overview of the major * moving parts [bac]: * * - Getdents syscall calls readdir, which is supposed to call * filldir some number of times. * - Each time it's called, filldir updates a struct with the * number of bytes copied thus far, and sets an error code if * appropriate. * - When readdir returns, getdents checks the struct to see if * any dentries were copied, and if so returns the byte count. * Otherwise, it returns the error from the struct (which should * still be zero if filldir was never called). * * A consequence of this last fact is that if there are no more * dentries, then readdir should NOT call filldir, and should * return from readdir with a non-error. * * Other notes: * * - Passing an inum of zero to filldir doesn't work. At a minimum, * you have to make up a bogus inum for each dentry. * - Passing the correct entryType to filldir seems to be non-critical; * apparently most programs (such as ls) stat each file if they * really want to know what type it is. However, passing the * correct type means that ls doesn't bother calling stat on * directories, and that saves an entire round trip per dirctory * dentry. * * Results: * Returns zero if on success, negative error on failure. * (According to /fs/readdir.c, any non-negative return value * means it succeeded). * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsDoReaddir(struct file *file, // IN: Bool dotAndDotDotIgnore, // IN: ignore "." and ".." filldir_t filldirCb, // IN: system filler callback void *filldirCtx, // IN/OUT: system filler context loff_t *fillPos, // IN/OUT: fill entry position loff_t *currentPos) // IN/OUT: current position { char *entryName = NULL; // buf for escaped version of name size_t entryNameBufLen = NAME_MAX + 1; int entryNameLength = 0; int result = 0; Bool entryEnd = FALSE; ASSERT(file); ASSERT(filldirCtx); if (!file || !(file->f_dentry) || !(file->f_dentry->d_inode)) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsReaddir: null input\n")); return -EFAULT; } LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s, inum %lu, pos %Lu)\n", __func__, file->f_dentry->d_name.name, file->f_dentry->d_inode->i_ino, *currentPos)); /* * Refresh entries if required. See rm -rf 6.10+ breaking issue. */ result = HgfsReaddirRefreshEntries(file); if (result != 0) { return result; } /* * Some day when we're out of things to do we can move this to a slab * allocator. */ entryName = kmalloc(entryNameBufLen, GFP_KERNEL); if (entryName == NULL) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsReaddir: out of memory allocating " "escaped name buffer\n")); return -ENOMEM; } while (!entryEnd) { Bool entryIgnore; ino_t entryIno = 0; uint32 entryType = DT_UNKNOWN; result = HgfsReaddirNextEntry(file, *currentPos, dotAndDotDotIgnore, entryNameBufLen, entryName, &entryNameLength, &entryIno, &entryType, &entryIgnore, &entryEnd); if (result != 0) { /* An error occurred retrieving the entry, so exit. */ break; } if (entryEnd) { LOG(10, (KERN_DEBUG "VMware hgfs: %s: end of dir reached\n", __func__)); continue; } if (entryIgnore) { *currentPos += 1; continue; } /* * Call the HGFS wrapper to the system fill function to set this dentry. */ LOG(6, (KERN_DEBUG "VMware hgfs: %s: dir_emit(%s, %u, @ (fill %Lu HGFS %Lu)\n", __func__, entryName, entryNameLength, *fillPos, *currentPos)); if (!HgfsReaddirFillEntry(filldirCb, /* filldir callback function */ filldirCtx, /* filldir callback struct */ entryName, /* name of dirent */ entryNameLength, /* length of name */ *fillPos, /* fill entry position */ entryIno, /* inode number (0 makes it not show) */ entryType)) { /* type of dirent */ /* * This means that dir_emit ran out of room in the user buffer * it was copying into; we just break out and return, but * don't increment f_pos. So the next time the user calls * getdents, this dentry will be requested again, will get * retrieved again, and get copied properly to the user. */ result = 0; break; } *currentPos += 1; *fillPos += 1; } LOG(6, (KERN_DEBUG "VMware hgfs: %s: return\n",__func__)); kfree(entryName); return 0; } #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) /* *---------------------------------------------------------------------- * * HgfsReaddir -- * * Handle a readdir request. * * Results: * Returns zero on success, or an error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsReaddir(struct file *file, // IN: struct dir_context *ctx) // IN: { HgfsFileInfo *fInfo = FILE_GET_FI_P(file); if (0 == ctx->pos) { fInfo->direntPos = 0; } /* If either dot and dotdot are filled in for us we can exit. */ if (!dir_emit_dots(file, ctx)) { LOG(6, (KERN_DEBUG "VMware hgfs: %s: dir_emit_dots(%s, @ %Lu)\n", __func__, file->f_dentry->d_name.name, ctx->pos)); return 0; } /* It is sufficient to pass the context as it contains the filler function. */ return HgfsDoReaddir(file, TRUE, NULL, ctx, &ctx->pos, &fInfo->direntPos); } /* *---------------------------------------------------------------------- * * HgfsReaddirFillEntry -- * * Fill a readdir entry. * * Failure means that fill ran out of room in the user buffer * it was copying into. * * Results: * Returns TRUE on success, or FALSE on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static Bool HgfsReaddirFillEntry(filldir_t filldirCb, // IN: System filler callback void *filldirCtx, // IN/OUT: System filler context char *entryName, // IN: entry name uint32 entryNameLength, // IN: max name length loff_t entryPos, // IN: position = (ctx-pos) ino_t entryIno, // IN: inode entry number uint32 entryType) // IN: entry type { struct dir_context *ctx = filldirCtx; Bool result; ASSERT(filldirCb == NULL); /* Contained within the context structure. */ ASSERT(ctx != NULL); ASSERT(ctx->pos == entryPos); ASSERT(entryName != NULL); ASSERT(entryNameLength != 0); LOG(6, (KERN_DEBUG "VMware hgfs: %s: dir_emit(%s, %u, %Lu)\n", __func__, entryName, entryNameLength, ctx->pos)); result = dir_emit(ctx, /* filldir callback struct */ entryName, /* name of dirent */ entryNameLength, /* length of name */ entryIno, /* inode number (0 makes it not show) */ entryType); /* type of dirent */ return result; } #else /* *---------------------------------------------------------------------- * * HgfsReaddir -- * * Handle a readdir request. * * Results: * Returns zero on success, or an error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsReaddir(struct file *file, // IN: Directory to read from void *dirent, // OUT: Buffer to copy dentries into filldir_t filldir) // IN: Filler function { HgfsFileInfo *fInfo = FILE_GET_FI_P(file); if (0 == file->f_pos) { fInfo->direntPos = 0; } return HgfsDoReaddir(file, FALSE, filldir, dirent, &file->f_pos, &fInfo->direntPos); } /* *---------------------------------------------------------------------- * * HgfsReaddirFillEntry -- * * Fill a readdir entry. * * Failure means that fill ran out of room in the user buffer * it was copying into. * * Results: * Returns TRUE on success, or FALSE on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static Bool HgfsReaddirFillEntry(filldir_t filldirCb, // IN: System filler callback void *filldirCtx, // IN/OUT: System filler context char *entryName, // IN: entry name uint32 entryNameLength, // IN: max name length loff_t entryPos, // IN: position ino_t entryIno, // IN: inode entry number uint32 entryType) // IN: entry type { Bool result = TRUE; int fillResult; ASSERT(filldirCb != NULL); ASSERT(filldirCtx != NULL); ASSERT(entryName != NULL); ASSERT(entryNameLength != 0); LOG(6, (KERN_DEBUG "VMware hgfs: %s: calling filldir(%s, %u, %Lu\n", __func__, entryName, entryNameLength, entryPos)); fillResult = filldirCb(filldirCtx, /* filldir callback struct */ entryName, /* name of dirent */ entryNameLength, /* length of name */ entryPos, /* offset of dirent */ entryIno, /* inode number (0 makes it not show) */ entryType); /* type of dirent */ if (fillResult != 0) { result = FALSE; } LOG(6, (KERN_DEBUG "VMware hgfs: %s: return %d\n", __func__, result)); return result; } #endif /* *---------------------------------------------------------------------- * * HgfsDirRelease -- * * Called when the last reader of a directory closes it, i.e. when * the directory's file f_count field becomes zero. * * Results: * Returns zero on success, or an error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsDirRelease(struct inode *inode, // IN: Inode that the file* points to struct file *file) // IN: File for the dir getting released { HgfsHandle handle; ASSERT(inode); ASSERT(file); ASSERT(file->f_dentry); ASSERT(file->f_dentry->d_sb); handle = FILE_GET_FI_P(file)->handle; HgfsReleaseFileInfo(file); return HgfsPrivateDirRelease(file, handle); } vmhgfs-only/page.c 0000444 0000000 0000000 00000200120 13432725306 013103 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * page.c -- * * Address space operations for the filesystem portion of the vmhgfs driver. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include <linux/pagemap.h> #include "compat_mm.h" #include "compat_page-flags.h" #include "compat_fs.h" #include "compat_kernel.h" #include "compat_pagemap.h" #include "compat_highmem.h" #include <linux/writeback.h> #include "cpName.h" #include "hgfsProto.h" #include "module.h" #include "request.h" #include "hgfsUtil.h" #include "fsutil.h" #include "inode.h" #include "vm_assert.h" #include "vm_basic_types.h" #include "vm_basic_defs.h" #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) #define HGFS_SM_MB_BEFORE smp_mb__before_atomic #define HGFS_SM_MB_AFTER smp_mb__after_atomic #else /* * Fedora 21 backported some of the atomic primitives so * we test if they are defined and use them otherwise fallback * to the older variants. */ #ifdef smp_mb__before_atomic #define HGFS_SM_MB_BEFORE smp_mb__before_atomic #else #define HGFS_SM_MB_BEFORE smp_mb__before_clear_bit #endif #ifdef smp_mb__after_atomic #define HGFS_SM_MB_AFTER smp_mb__after_atomic #else #define HGFS_SM_MB_AFTER smp_mb__after_clear_bit #endif #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) #define HGFS_PAGE_FILE_INDEX(page) page_file_index(page) #else #define HGFS_PAGE_FILE_INDEX(page) ((page)->index) #endif /* Private functions. */ static int HgfsDoWrite(HgfsHandle handle, HgfsDataPacket dataPacket[], uint32 numEntries, loff_t offset); static int HgfsDoRead(HgfsHandle handle, HgfsDataPacket dataPacket[], uint32 numEntries, loff_t offset); static int HgfsDoReadpage(HgfsHandle handle, struct page *page, unsigned pageFrom, unsigned pageTo); static int HgfsDoWritepage(HgfsHandle handle, struct page *page, unsigned pageFrom, unsigned pageTo); static int HgfsDoWriteBegin(struct file *file, struct page *page, unsigned pageFrom, unsigned pageTo, Bool canRetry, Bool *doRetry); static int HgfsDoWriteEnd(struct file *file, struct page *page, unsigned pageFrom, unsigned pageTo, loff_t writeTo, unsigned copied); static void HgfsDoExtendFile(struct inode *inode, loff_t writeTo); /* HGFS address space operations. */ static int HgfsReadpage(struct file *file, struct page *page); static int HgfsWritepage(struct page *page, struct writeback_control *wbc); /* * Write aop interface has changed in 2.6.28. Specifically, * the page locking semantics and requirement to handle * short writes. We already handle short writes, so no major * changes needed. write_begin is expected to return a locked * page and write_end is expected to unlock the page and drop * the reference before returning. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) static int HgfsWriteBegin(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned flags, struct page **page, void **clientData); static int HgfsWriteEnd(struct file *file, struct address_space *mapping, loff_t pos, unsigned len, unsigned copied, struct page *page, void *clientData); #else static int HgfsPrepareWrite(struct file *file, struct page *page, unsigned pageFrom, unsigned pageTo); static int HgfsCommitWrite(struct file *file, struct page *page, unsigned pageFrom, unsigned pageTo); #endif /* HGFS address space operations structure. */ struct address_space_operations HgfsAddressSpaceOperations = { .readpage = HgfsReadpage, .writepage = HgfsWritepage, #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) .write_begin = HgfsWriteBegin, .write_end = HgfsWriteEnd, #else .prepare_write = HgfsPrepareWrite, .commit_write = HgfsCommitWrite, #endif .set_page_dirty = __set_page_dirty_nobuffers, }; enum { PG_BUSY = 0, }; typedef struct HgfsWbPage { struct list_head wb_list; /* Defines state of page: */ struct page *wb_page; /* page to read in/write out */ pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ struct kref wb_kref; /* reference count */ unsigned long wb_flags; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13) wait_queue_head_t wb_queue; #endif } HgfsWbPage; static void HgfsInodePageWbAdd(struct inode *inode, struct page *page); static void HgfsInodePageWbRemove(struct inode *inode, struct page *page); static Bool HgfsInodePageWbFind(struct inode *inode, struct page *page); static void HgfsWbRequestDestroy(HgfsWbPage *req); static Bool HgfsCheckReadModifyWrite(struct file *file, struct page *page, unsigned int pageFrom, unsigned int pageTo); /* * Private functions. */ /* *----------------------------------------------------------------------------- * * HgfsDoRead -- * * Do one read request. Called by HgfsReadpage, possibly multiple times * if the size of the read is too big to be handled by one server request. * * We send a "Read" request to the server with the given handle. * * It is assumed that this function is never called with a larger read than * what can be sent in one request. * * HgfsDataPacket is an array of pages into which data will be read. * * Results: * Returns the number of bytes read on success, or an error on failure. * * Side effects: * None. * *---------------------------------------------------------------------------- */ static int HgfsDoRead(HgfsHandle handle, // IN: Handle for this file HgfsDataPacket dataPacket[], // IN/OUT: Data description uint32 numEntries, // IN: Number of entries in dataPacket loff_t offset) // IN: Offset at which to read { HgfsReq *req; HgfsOp opUsed; int result = 0; uint32 actualSize = 0; char *payload = NULL; HgfsStatus replyStatus; char *buf; uint32 count; ASSERT(numEntries == 1); count = dataPacket[0].len; req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionRead; if (opUsed == HGFS_OP_READ_FAST_V4) { HgfsRequest *header; HgfsRequestReadV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestReadV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->offset = offset; request->requiredSize = count; request->reserved = 0; req->dataPacket = kmalloc(numEntries * sizeof req->dataPacket[0], GFP_KERNEL); if (!req->dataPacket) { LOG(4, (KERN_WARNING "%s: Failed to allocate mem\n", __func__)); result = -ENOMEM; goto out; } memcpy(req->dataPacket, dataPacket, numEntries * sizeof req->dataPacket[0]); req->numEntries = numEntries; LOG(4, (KERN_WARNING "VMware hgfs: Fast Read V4\n")); } else if (opUsed == HGFS_OP_READ_V3) { HgfsRequest *header; HgfsRequestReadV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestReadV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *request - sizeof *header, count); request->reserved = 0; req->dataPacket = NULL; req->numEntries = 0; req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestRead *request; request = (HgfsRequestRead *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->file = handle; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *request, count); req->dataPacket = NULL; req->numEntries = 0; req->payloadSize = sizeof *request; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: if (opUsed == HGFS_OP_READ_FAST_V4) { actualSize = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize; } else if (opUsed == HGFS_OP_READ_V3) { actualSize = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize; payload = ((HgfsReplyReadV3 *)HGFS_REP_PAYLOAD_V3(req))->payload; } else { actualSize = ((HgfsReplyRead *)HGFS_REQ_PAYLOAD(req))->actualSize; payload = ((HgfsReplyRead *)HGFS_REQ_PAYLOAD(req))->payload; } /* Sanity check on read size. */ if (actualSize > count) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: read too big!\n")); result = -EPROTO; goto out; } result = actualSize; if (actualSize == 0) { /* We got no bytes. */ LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoRead: server returned " "zero\n")); goto out; } /* Return result. */ if (opUsed == HGFS_OP_READ_V3 || opUsed == HGFS_OP_READ) { buf = kmap(dataPacket[0].page) + dataPacket[0].offset; ASSERT(buf); memcpy(buf, payload, actualSize); LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoRead: copied %u\n", actualSize)); kunmap(dataPacket[0].page); } break; case -EPROTO: /* Retry with older version(s). Set globally. */ switch (opUsed) { case HGFS_OP_READ_FAST_V4: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: Fast Read V4 not " "supported. Falling back to V3 Read.\n")); if (req->dataPacket) { kfree(req->dataPacket); req->dataPacket = NULL; } hgfsVersionRead = HGFS_OP_READ_V3; goto retry; case HGFS_OP_READ_V3: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionRead = HGFS_OP_READ; goto retry; default: break; } break; default: break; } } else if (result == -EIO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoRead: unknown error: " "%d\n", result)); } out: if (req->dataPacket) { kfree(req->dataPacket); } HgfsFreeRequest(req); return result; } /* *----------------------------------------------------------------------------- * * HgfsDoWrite -- * * Do one write request. Called by HgfsDoWritepage, possibly multiple * times if the size of the write is too big to be handled by one server * request. * * We send a "Write" request to the server with the given handle. * * It is assumed that this function is never called with a larger write * than what can be sent in one request. * * HgfsDataPacket is an array of pages from which data will be written * to file. * * Results: * Returns the number of bytes written on success, or an error on failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int HgfsDoWrite(HgfsHandle handle, // IN: Handle for this file HgfsDataPacket dataPacket[], // IN: Data description uint32 numEntries, // IN: Number of entries in dataPacket loff_t offset) // IN: Offset to begin writing at { HgfsReq *req; int result = 0; HgfsOp opUsed; uint32 requiredSize = 0; uint32 actualSize = 0; char *payload = NULL; uint32 reqSize; HgfsStatus replyStatus; char *buf; uint32 count; ASSERT(numEntries == 1); count = dataPacket[0].len; req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionWrite; if (opUsed == HGFS_OP_WRITE_FAST_V4) { HgfsRequest *header; HgfsRequestWriteV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestWriteV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->flags = 0; request->offset = offset; request->requiredSize = count; request->reserved = 0; payload = request->payload; requiredSize = request->requiredSize; req->dataPacket = kmalloc(numEntries * sizeof req->dataPacket[0], GFP_KERNEL); if (!req->dataPacket) { LOG(4, (KERN_WARNING "%s: Failed to allocate mem\n", __func__)); result = -ENOMEM; goto out; } memcpy(req->dataPacket, dataPacket, numEntries * sizeof req->dataPacket[0]); req->numEntries = numEntries; reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); req->payloadSize = reqSize; LOG(4, (KERN_WARNING "VMware hgfs: Fast Write V4\n")); } else if (opUsed == HGFS_OP_WRITE_V3) { HgfsRequest *header; HgfsRequestWriteV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestWriteV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->flags = 0; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *header - sizeof *request, count); LOG(4, (KERN_WARNING "VMware hgfs: Using write V3\n")); request->reserved = 0; payload = request->payload; requiredSize = request->requiredSize; reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); req->dataPacket = NULL; req->numEntries = 0; buf = kmap(dataPacket[0].page) + dataPacket[0].offset; memcpy(payload, buf, requiredSize); kunmap(dataPacket[0].page); req->payloadSize = reqSize + requiredSize - 1; } else { HgfsRequestWrite *request; request = (HgfsRequestWrite *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->file = handle; request->flags = 0; request->offset = offset; request->requiredSize = MIN(req->bufferSize - sizeof *request, count); payload = request->payload; requiredSize = request->requiredSize; reqSize = sizeof *request; req->dataPacket = NULL; req->numEntries = 0; buf = kmap(dataPacket[0].page) + dataPacket[0].offset; memcpy(payload, buf, requiredSize); kunmap(dataPacket[0].page); req->payloadSize = reqSize + requiredSize - 1; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: res %u\n", result)); switch (result) { case 0: if (opUsed == HGFS_OP_WRITE_V3 || opUsed == HGFS_OP_WRITE_FAST_V4) { actualSize = ((HgfsReplyWriteV3 *)HGFS_REP_PAYLOAD_V3(req))->actualSize; } else { actualSize = ((HgfsReplyWrite *)HGFS_REQ_PAYLOAD(req))->actualSize; } /* Return result. */ LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoWrite: wrote %u bytes\n", actualSize)); result = actualSize; break; case -EPROTO: /* Retry with older version(s). Set globally. */ switch (opUsed) { case HGFS_OP_WRITE_FAST_V4: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: Fast Write V4 not " "supported. Falling back to V3 write.\n")); if (req->dataPacket) { kfree(req->dataPacket); req->dataPacket = NULL; } hgfsVersionWrite = HGFS_OP_WRITE_V3; goto retry; case HGFS_OP_WRITE_V3: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionWrite = HGFS_OP_WRITE; goto retry; default: break; } break; default: LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: server " "returned error: %d\n", result)); break; } } else if (result == -EIO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoWrite: unknown error: " "%d\n", result)); } out: if (req->dataPacket) { kfree(req->dataPacket); } HgfsFreeRequest(req); return result; } /* *----------------------------------------------------------------------------- * * HgfsDoReadpage -- * * Reads in a single page, using the specified handle and page offsets. * At the time of writing, HGFS_IO_MAX == PAGE_CACHE_SIZE, so we could * avoid the do {} while() and just read the page as is, but in case the * above assumption is ever broken, it's nice that this will continue to * "just work". * * Results: * Zero on success, non-zero on error. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int HgfsDoReadpage(HgfsHandle handle, // IN: Handle to use for reading struct page *page, // IN/OUT: Page to read into unsigned pageFrom, // IN: Where to start reading to unsigned pageTo) // IN: Where to stop reading { int result = 0; loff_t curOffset = ((loff_t)HGFS_PAGE_FILE_INDEX(page) << PAGE_CACHE_SHIFT) + pageFrom; size_t nextCount, remainingCount = pageTo - pageFrom; HgfsDataPacket dataPacket[1]; LOG(6, (KERN_WARNING "VMware hgfs: HgfsDoReadpage: read %Zu bytes from fh %u " "at offset %Lu\n", remainingCount, handle, curOffset)); /* * Call HgfsDoRead repeatedly until either * - HgfsDoRead returns an error, or * - HgfsDoRead returns 0 (end of file), or * - We have read the requested number of bytes. */ do { nextCount = (remainingCount > HGFS_IO_MAX) ? HGFS_IO_MAX : remainingCount; dataPacket[0].page = page; dataPacket[0].offset = pageFrom; dataPacket[0].len = nextCount; result = HgfsDoRead(handle, dataPacket, 1, curOffset); if (result < 0) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsDoReadpage: read error %d\n", result)); goto out; } remainingCount -= result; curOffset += result; pageFrom += result; } while ((result > 0) && (remainingCount > 0)); /* * It's possible that despite being asked to read a full page, there is less * than a page in the file from this offset, so we should zero the rest of * the page's memory. */ if (remainingCount) { char *buffer = kmap(page) + pageTo; LOG(6, (KERN_DEBUG "VMware hgfs: %s: zeroing last %Zu bytes\n", __func__, remainingCount)); memset(buffer - remainingCount, 0, remainingCount); kunmap(page); } /* * We read a full page (or all of the page that actually belongs to the * file), so mark it up to date. Also, flush the old page data from the data * cache. */ flush_dcache_page(page); SetPageUptodate(page); result = 0; out: compat_unlock_page(page); return result; } /* *----------------------------------------------------------------------------- * * HgfsDoWritepageInt -- * * Writes out a single page, using the specified handle and page offsets. * At the time of writing, HGFS_IO_MAX == PAGE_CACHE_SIZE, so we could * avoid the do {} while() and just write the page as is, but in case the * above assumption is ever broken, it's nice that this will continue to * "just work". * * Results: * Number of bytes copied on success, negative error on error. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int HgfsDoWritepageInt(HgfsHandle handle, // IN: Handle to use for writing struct page *page, // IN: Page containing data to write unsigned pageFrom, // IN: Beginning page offset unsigned pageTo) // IN: Ending page offset { int result = 0; loff_t curOffset = ((loff_t)HGFS_PAGE_FILE_INDEX(page) << PAGE_CACHE_SHIFT) + pageFrom; size_t nextCount; size_t remainingCount = pageTo - pageFrom; struct inode *inode; HgfsDataPacket dataPacket[1]; ASSERT(page->mapping); ASSERT(page->mapping->host); inode = page->mapping->host; LOG(4, (KERN_WARNING "VMware hgfs: %s: start writes at %Lu\n", __func__, curOffset)); /* * Call HgfsDoWrite repeatedly until either * - HgfsDoWrite returns an error, or * - HgfsDoWrite returns 0 (XXX this probably rarely happens), or * - We have written the requested number of bytes. */ do { nextCount = (remainingCount > HGFS_IO_MAX) ? HGFS_IO_MAX : remainingCount; dataPacket[0].page = page; dataPacket[0].offset = pageFrom; dataPacket[0].len = nextCount; result = HgfsDoWrite(handle, dataPacket, 1, curOffset); if (result < 0) { LOG(4, (KERN_WARNING "VMware hgfs: %s: write error %d\n", __func__, result)); goto exit; } remainingCount -= result; curOffset += result; pageFrom += result; /* Update the inode's size now rather than waiting for a revalidate. */ HgfsDoExtendFile(inode, curOffset); } while ((result > 0) && (remainingCount > 0)); exit: if (result >= 0) { result = pageTo - pageFrom - remainingCount; } return result; } /* *----------------------------------------------------------------------------- * * HgfsDoWritepage -- * * Writes out a single page, using the specified handle and page offsets. * At the time of writing, HGFS_IO_MAX == PAGE_CACHE_SIZE, so we could * avoid the do {} while() and just write the page as is, but in case the * above assumption is ever broken, it's nice that this will continue to * "just work". * * A quick note about appending to files. Before HGFS used the page cache, * an HgfsWrite examined a file's f_flags and added HGFS_WRITE_APPEND to * the write packet if the file was opened with O_APPEND. This causes the * server to reopen the fd with O_APPEND so that writes will append to the * end. * * In the page cache world, this won't work because we may have arrived at * this function via writepage(), which doesn't give us a particular file * and thus we don't know if we should be appending or not. In fact, the * generic write path employed by the page cache handles files with O_APPEND * set by moving the file offset to the result of i_size_read(). So we * shouldn't ever need to set HGFS_WRITE_APPEND, as now we will handle all * write appends, instead of telling the server to do it for us. * * Results: * Zero on success, non-zero on error. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int HgfsDoWritepage(HgfsHandle handle, // IN: Handle to use for writing struct page *page, // IN: Page containing data to write unsigned pageFrom, // IN: Beginning page offset unsigned pageTo) // IN: Ending page offset { int result = 0; LOG(4, (KERN_WARNING "VMware hgfs: %s: start writes at %u to %u\n", __func__, pageFrom, pageTo)); result = HgfsDoWritepageInt(handle, page, pageFrom, pageTo); if (result < 0) { goto exit; } HgfsInodePageWbRemove(page->mapping->host, page); result = 0; SetPageUptodate(page); exit: LOG(4, (KERN_WARNING "VMware hgfs: %s: return %d\n", __func__, result)); return result; } /* * HGFS address space operations. */ /* *----------------------------------------------------------------------------- * * HgfsReadpage -- * * Read a page from an open file. Like HgfsWritepage, there are some * complicated locking rules governing this function. The page arrives from * the VFS locked, and we must unlock it before exiting. In addition, we * must acquire a reference to the page before mapping it, and we must * flush the page's data from the data cache (not to be confused with * dcache i.e. the dentry cache). * * Results: * Zero on success, non-zero on error. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int HgfsReadpage(struct file *file, // IN: File to read from struct page *page) // IN/OUT: Page to write to { int result = 0; HgfsHandle handle; ASSERT(file); ASSERT(file->f_dentry); ASSERT(file->f_dentry->d_inode); ASSERT(page); handle = FILE_GET_FI_P(file)->handle; LOG(6, (KERN_WARNING "VMware hgfs: %s: reading from handle %u\n", __func__, handle)); page_cache_get(page); result = HgfsDoReadpage(handle, page, 0, PAGE_CACHE_SIZE); page_cache_release(page); return result; } /* *----------------------------------------------------------------------------- * * HgfsWritepage -- * * The "spontaneous" way to write a page, called when the kernel is under * memory pressure or is asked to sync a memory mapped file. Because * writepage() can be called from so many different places, we don't get a * filp with which to write, and we have to be very careful about races and * locking. * * Results: * Zero on success, non-zero on error. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int HgfsWritepage(struct page *page, // IN: Page to write from struct writeback_control *wbc) // IN: Ignored { struct inode *inode; HgfsHandle handle; int result; pgoff_t lastPageIndex; pgoff_t pageIndex; loff_t currentFileSize; unsigned to = PAGE_CACHE_SIZE; ASSERT(page); ASSERT(page->mapping); ASSERT(page->mapping->host); inode = page->mapping->host; /* We need a writable file handle. */ result = HgfsGetHandle(inode, HGFS_OPEN_MODE_WRITE_ONLY + 1, &handle); if (result) { LOG(4, (KERN_WARNING "VMware hgfs: HgfsWritepage: could not get writable " "file handle\n")); goto exit; } /* * We were given an entire page to write. In most cases this means "start * writing from the beginning of the page (byte 0) to the very end (byte * PAGE_CACHE_SIZE). But what if this is the last page of the file? Then * we don't want to write a full PAGE_CACHE_SIZE bytes, but just however * many bytes may remain in the page. * * XXX: Other filesystems check the page index to make sure that the page * we're being asked to write is within the size of the file. I guess * that's because writepage() can race with truncate(), and if we find * ourselves here after a truncate(), we can drop the write. */ currentFileSize = compat_i_size_read(inode); lastPageIndex = currentFileSize >> PAGE_CACHE_SHIFT; pageIndex = HGFS_PAGE_FILE_INDEX(page); LOG(4, (KERN_WARNING "VMware hgfs: %s: file size lpi %lu pi %lu\n", __func__, lastPageIndex, pageIndex)); if (pageIndex > lastPageIndex) { goto exit; } else if (pageIndex == lastPageIndex) { to = currentFileSize & (PAGE_CACHE_SIZE - 1); if (to == 0) { goto exit; } } /* * This part is fairly intricate, so it deserves some explanation. We're * really interested in calling HgfsDoWritepage with our page and * handle, without having to then worry about locks or references. See * Documentation/filesystems/Locking in the kernel to see what rules we * must obey. * * Firstly, we acquire a reference to the page via page_cache_get() and call * compat_set_page_writeback(). The latter does a number of things: it sets * the writeback bit on the page, and if it wasn't already set, it sets the * writeback bit in the radix tree. Then, if the page isn't dirty, it clears * the dirty bit in the radix tree. The end result is that the radix tree's * notion of dirty and writeback is fully synced with the page itself. * * Secondly, we write the page itself. * * Thirdly, we end writeback of the page via compat_end_page_writeback(), * and release our reference on the page. * * Finally, we unlock the page, waking up its waiters and making it * available to anyone else. Note that this step must be performed * regardless of whether we wrote anything, as the VFS locked the page for * us. */ page_cache_get(page); compat_set_page_writeback(page); result = HgfsDoWritepage(handle, page, 0, to); compat_end_page_writeback(page); page_cache_release(page); exit: compat_unlock_page(page); return result; } /* *----------------------------------------------------------------------------- * * HgfsDoWriteBegin -- * * Helper function for HgfsWriteBegin / HgfsPrepareWrite. * * Initialize the page if the file is to be appended. * * Results: * Zero on success, always. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int HgfsDoWriteBegin(struct file *file, // IN: File to be written struct page *page, // IN: Page to be written unsigned pageFrom, // IN: Starting page offset unsigned pageTo, // IN: Ending page offset Bool canRetry, // IN: can we retry write Bool *doRetry) // OUT: set to retry if necessary { ASSERT(page); LOG(6, (KERN_DEBUG "VMware hgfs: %s: off %Lu: %u to %u\n", __func__, (loff_t)HGFS_PAGE_FILE_INDEX(page) << PAGE_CACHE_SHIFT, pageFrom, pageTo)); if (canRetry && HgfsCheckReadModifyWrite(file, page, pageFrom, pageTo)) { HgfsHandle readHandle; int result; result = HgfsGetHandle(page->mapping->host, HGFS_OPEN_MODE_READ_ONLY + 1, &readHandle); if (result == 0) { /* * We have a partial page write and thus require non-written part if the page * is to contain valid data. * A read of the page of the valid file data will set the page up to date. * If it fails the page will not be set up to date and the write end will write * the data out immediately (synchronously effectively). */ result = HgfsDoReadpage(readHandle, page, 0, PAGE_CACHE_SIZE); *doRetry = TRUE; } LOG(6, (KERN_DEBUG "VMware hgfs: %s: HgfsReadpage result %d\n", __func__, result)); } LOG(6, (KERN_DEBUG "VMware hgfs: %s: returns 0\n", __func__)); return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28) /* *----------------------------------------------------------------------------- * * HgfsPrepareWrite -- * * Called by the generic write path to set up a write request for a page. * We're expected to do any pre-allocation and housekeeping prior to * receiving the write. * * Results: * On success zero, always. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int HgfsPrepareWrite(struct file *file, // IN: File to be written struct page *page, // IN: Page to prepare unsigned pageFrom, // IN: Beginning page offset unsigned pageTo) // IN: Ending page offset { Bool dummyCanRetry = FALSE; return HgfsDoWriteBegin(file, page, pageFrom, pageTo, FALSE, &dummyCanRetry); } #else /* *----------------------------------------------------------------------------- * * HgfsWriteBegin -- * * Called by the generic write path to set up a write request for a page. * We're expected to do any pre-allocation and housekeeping prior to * receiving the write. * * This function is expected to return a locked page. * * Results: * Zero on success, non-zero error otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int HgfsWriteBegin(struct file *file, // IN: File to be written struct address_space *mapping, // IN: Mapping loff_t pos, // IN: File position unsigned len, // IN: Bytes to be written unsigned flags, // IN: Write flags struct page **pagePtr, // OUT: Locked page void **clientData) // OUT: Opaque to pass to write_end, unused { pgoff_t index = pos >> PAGE_CACHE_SHIFT; unsigned pageFrom = pos & (PAGE_CACHE_SIZE - 1); unsigned pageTo = pageFrom + len; struct page *page; int result; Bool canRetry = TRUE; Bool doRetry; LOG(6, (KERN_WARNING "VMware hgfs: %s: (%s/%s(%ld), %u@%lld)\n", __func__, file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name, mapping->host->i_ino, len, (long long) pos)); do { doRetry = FALSE; page = compat_grab_cache_page_write_begin(mapping, index, flags); if (page == NULL) { result = -ENOMEM; goto exit; } LOG(6, (KERN_DEBUG "VMware hgfs: %s: file size %Lu @ %Lu page %u to %u\n", __func__, (loff_t)compat_i_size_read(page->mapping->host), (loff_t)HGFS_PAGE_FILE_INDEX(page) << PAGE_CACHE_SHIFT, pageFrom, pageTo)); result = HgfsDoWriteBegin(file, page, pageFrom, pageTo, canRetry, &doRetry); ASSERT(result == 0); canRetry = FALSE; if (doRetry) { page_cache_release(page); } } while (doRetry); exit: *pagePtr = page; LOG(6, (KERN_DEBUG "VMware hgfs: %s: return %d\n", __func__, result)); return result; } #endif /* *----------------------------------------------------------------------------- * * HgfsDoExtendFile -- * * Helper function for extending a file size. * * This function updates the inode->i_size, under the inode lock. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static void HgfsDoExtendFile(struct inode *inode, // IN: File we're writing to loff_t writeTo) // IN: Offset we're written to { loff_t currentFileSize; spin_lock(&inode->i_lock); currentFileSize = compat_i_size_read(inode); if (writeTo > currentFileSize) { compat_i_size_write(inode, writeTo); } spin_unlock(&inode->i_lock); } /* *----------------------------------------------------------------------------- * * HgfsZeroUserSegments -- * * Wrapper function for setting a page's segments. * * This function updates the inode->i_size, under the inode lock. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static void HgfsZeroUserSegments(struct page *page, // IN: Page we're writing to unsigned int start1, // IN: segment 1 start unsigned int end1, // IN: segment 1 end unsigned int start2, // IN: segment 2 start unsigned int end2) // IN: segment 2 end { #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) zero_user_segments(page, start1, end1, start2, end2); #else void *kaddr = compat_kmap_atomic(page); if (end1 > start1) { memset(kaddr + start1, 0, end1 - start1); } if (end2 > start2) { memset(kaddr + start2, 0, end2 - start2); } compat_kunmap_atomic(kaddr); flush_dcache_page(page); #endif } /* *----------------------------------------------------------------------------- * * HgfsZeroUserSegments -- * * Wrapper function for zeroing a page's segments. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static void HgfsZeroUserSegment(struct page *page, // IN: Page we're writing to unsigned int start, // IN: segment 1 start unsigned int end) // IN: segment 1 end { HgfsZeroUserSegments(page, start, end, 0, 0); } /* *----------------------------------------------------------------------------- * * HgfsGetPageLength -- * * Helper function for finding the extent of valid file data in a page. * * Results: * The page valid data length. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static unsigned int HgfsGetPageLength(struct page *page) // IN: Page we're writing to { loff_t currentFileSize; unsigned int pageLength = 0; currentFileSize = compat_i_size_read(page->mapping->host); if (currentFileSize > 0) { pgoff_t pageIndex = HGFS_PAGE_FILE_INDEX(page); pgoff_t fileSizeIndex = (currentFileSize - 1) >> PAGE_CACHE_SHIFT; if (pageIndex < fileSizeIndex) { pageLength = PAGE_CACHE_SIZE; } else if (pageIndex == fileSizeIndex) { pageLength = ((currentFileSize - 1) & ~PAGE_CACHE_MASK) + 1; } } return pageLength; } /* *----------------------------------------------------------------------------- * * HgfsDoWriteEnd -- * * Helper function for HgfsWriteEnd. * * This function updates the inode->i_size, conditionally marks the page * updated and carries out the actual write in case of partial page writes. * * Results: * Zero on succes, non-zero on error. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int HgfsDoWriteEnd(struct file *file, // IN: File we're writing to struct page *page, // IN: Page we're writing from unsigned pageFrom, // IN: Starting page offset unsigned pageTo, // IN: Ending page offset loff_t writeTo, // IN: File position to write to unsigned copied) // IN: Number of bytes copied to the page { struct inode *inode; ASSERT(file); ASSERT(page); inode = page->mapping->host; LOG(6, (KERN_WARNING "VMware hgfs: %s: (%s/%s(%ld), from %u to %u@%lld => %u)\n", __func__, file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name, page->mapping->host->i_ino, pageFrom, pageTo, (long long) writeTo, copied)); /* * Zero any uninitialised parts of the page, and then mark the page * as up to date if it turns out that we're extending the file. */ if (!PageUptodate(page)) { unsigned int pageLength = HgfsGetPageLength(page); if (pageLength == 0) { /* No file valid data in this page. Zero unwritten segments only. */ HgfsZeroUserSegments(page, 0, pageFrom, pageTo, PAGE_CACHE_SIZE); SetPageUptodate(page); } else if (pageTo >= pageLength) { /* Some file valid data in this page. Zero unwritten segments only. */ HgfsZeroUserSegment(page, pageTo, PAGE_CACHE_SIZE); if (pageTo == 0) { /* Overwritten all file valid data in this page. So the page is uptodate. */ SetPageUptodate(page); } } else { /* Overwriting part of the valid file data. */ HgfsZeroUserSegment(page, pageLength, PAGE_CACHE_SIZE); } } if (!PageUptodate(page)) { HgfsHandle handle = FILE_GET_FI_P(file)->handle; int result; /* Do a synchronous write since we have a partial page write of data. */ result = HgfsDoWritepageInt(handle, page, pageFrom, pageTo); if (result == 0) { LOG(6, (KERN_WARNING "VMware hgfs: %s: sync write return %d\n", __func__, result)); } } else { /* Page to write contains all valid data. */ set_page_dirty(page); /* * Track the pages being written. */ HgfsInodePageWbAdd(inode, page); } HgfsDoExtendFile(inode, writeTo); LOG(6, (KERN_WARNING "VMware hgfs: %s: return 0\n", __func__)); return 0; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 28) /* *----------------------------------------------------------------------------- * * HgfsCommitWrite -- * * This function is the more common write path for HGFS, called from * generic_file_buffered_write. It is much simpler for us than * HgfsWritepage above: the caller has obtained a reference to the page * and will unlock it when we're done. And we don't need to worry about * properly marking the writeback bit, either. See mm/filemap.c in the * kernel for details about how we are called. * * Results: * Zero on succes, non-zero on error. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int HgfsCommitWrite(struct file *file, // IN: File to write struct page *page, // IN: Page to write from unsigned pageFrom, // IN: Starting page offset unsigned pageTo) // IN: Ending page offset { loff_t offset; loff_t writeTo; unsigned copied; ASSERT(page); ASSERT(file); offset = (loff_t)HGFS_PAGE_FILE_INDEX(page) << PAGE_CACHE_SHIFT; writeTo = offset + pageTo; copied = pageTo - pageFrom; return HgfsDoWriteEnd(file, page, pageFrom, pageTo, writeTo, copied); } #else /* *----------------------------------------------------------------------------- * * HgfsWriteEnd -- * * This function is the more common write path for HGFS, called from * generic_file_buffered_write. It is much simpler for us than * HgfsWritepage above: write_begin has obtained a reference to the page * and we will unlock it when we're done. And we don't need to worry about * properly marking the writeback bit, either. See mm/filemap.c in the * kernel for details about how we are called. * * This function should unlock the page and reduce the refcount. * * Results: * Number of bytes written or negative error * * Side effects: * Unlocks the page and drops the reference. * *----------------------------------------------------------------------------- */ static int HgfsWriteEnd(struct file *file, // IN: File to write struct address_space *mapping, // IN: Mapping loff_t pos, // IN: File position unsigned len, // IN: len passed from write_begin unsigned copied, // IN: Number of actually copied bytes struct page *page, // IN: Page to write from void *clientData) // IN: From write_begin, unused. { unsigned pageFrom = pos & (PAGE_CACHE_SIZE - 1); unsigned pageTo = pageFrom + len; loff_t writeTo = pos + copied; int ret; ASSERT(file); ASSERT(mapping); ASSERT(page); LOG(6, (KERN_WARNING "VMware hgfs: %s: (%s/%s(%ld), %u@%lld,=>%u)\n", __func__, file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name, mapping->host->i_ino, len, (long long) pos, copied)); if (copied < len) { HgfsZeroUserSegment(page, pageFrom + copied, pageFrom + len); } ret = HgfsDoWriteEnd(file, page, pageFrom, pageTo, writeTo, copied); if (ret == 0) { ret = copied; } compat_unlock_page(page); page_cache_release(page); LOG(6, (KERN_WARNING "VMware hgfs: %s: return %d\n", __func__, ret)); return ret; } #endif /* *---------------------------------------------------------------------- * * HgfsWbPageAlloc -- * * Allocates a write-back page object. * * Results: * The write-back page object * * Side effects: * None * *---------------------------------------------------------------------- */ static inline HgfsWbPage * HgfsWbPageAlloc(void) { return kmalloc(sizeof (HgfsWbPage), GFP_KERNEL); } /* *---------------------------------------------------------------------- * * HgfsWbPageAlloc -- * * Frees a write-back page object. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static inline void HgfsWbPageFree(HgfsWbPage *page) // IN: request of page data to write { ASSERT(page); kfree(page); } /* *---------------------------------------------------------------------- * * HgfsWbRequestFree -- * * Frees the resources for a write-back page request. * Calls the request destroy and then frees the object memory. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsWbRequestFree(struct kref *kref) // IN: ref field request of page data to write { HgfsWbPage *req = container_of(kref, HgfsWbPage, wb_kref); /* Release write back request page and free it. */ HgfsWbRequestDestroy(req); HgfsWbPageFree(req); } /* *---------------------------------------------------------------------- * * HgfsWbRequestGet -- * * Reference the write-back page request. * Calls the request destroy and then frees the object memory. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ void HgfsWbRequestGet(HgfsWbPage *req) // IN: request of page data to write { kref_get(&req->wb_kref); } /* *---------------------------------------------------------------------- * * HgfsWbRequestPut -- * * Remove a reference the write-back page request. * Calls the request free to tear down the object memory if it was the * final one. * * Results: * None * * Side effects: * Destroys the request if last one. * *---------------------------------------------------------------------- */ void HgfsWbRequestPut(HgfsWbPage *req) // IN: request of page data to write { kref_put(&req->wb_kref, HgfsWbRequestFree); } /* *---------------------------------------------------------------------- * * HgfsWbRequestWaitUninterruptible -- * * Sleep function while waiting for requests to complete. * * Results: * Always zero. * * Side effects: * None * *---------------------------------------------------------------------- */ #if !defined VMW_WAITONBIT_317 && LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) && \ LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) static int HgfsWbRequestWaitUninterruptible(void *word) // IN:unused { io_schedule(); return 0; } #endif /* *---------------------------------------------------------------------- * * HgfsWbRequestWait -- * * Wait for a write-back page request to complete. * Interruptible by fatal signals only. * The user is responsible for holding a count on the request. * * Results: * Returned value will be zero if the bit was cleared, * non-zero if the process received a signal and the mode * permitted wakeup on that signal. * * Side effects: * None * *---------------------------------------------------------------------- */ int HgfsWbRequestWait(HgfsWbPage *req) // IN: request of page data to write { #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) return wait_on_bit_io(&req->wb_flags, PG_BUSY, TASK_UNINTERRUPTIBLE); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) return wait_on_bit(&req->wb_flags, PG_BUSY, #if !defined VMW_WAITONBIT_317 && LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) HgfsWbRequestWaitUninterruptible, #endif TASK_UNINTERRUPTIBLE); #else wait_event(req->wb_queue, !test_bit(PG_BUSY, &req->wb_flags)); return 0; #endif } /* *---------------------------------------------------------------------- * * HgfsWbRequestLock -- * * Lock the write-back page request. * * Results: * Non-zero if the lock was not already locked * * Side effects: * None * *---------------------------------------------------------------------- */ static inline int HgfsWbRequestLock(HgfsWbPage *req) // IN: request of page data to write { return !test_and_set_bit(PG_BUSY, &req->wb_flags); } /* *---------------------------------------------------------------------- * * HgfsWbRequestUnlock -- * * Unlock the write-back page request. * Wakes up any waiting threads on the lock. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsWbRequestUnlock(HgfsWbPage *req) // IN: request of page data to write { if (!test_bit(PG_BUSY,&req->wb_flags)) { LOG(6, (KERN_WARNING "VMware Hgfs: %s: Invalid unlock attempted\n", __func__)); return; } HGFS_SM_MB_BEFORE(); clear_bit(PG_BUSY, &req->wb_flags); HGFS_SM_MB_AFTER(); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) wake_up_bit(&req->wb_flags, PG_BUSY); #else wake_up(&req->wb_queue); #endif } /* *---------------------------------------------------------------------- * * HgfsWbRequestUnlockAndPut -- * * Unlock the write-back page request and removes a reference. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsWbRequestUnlockAndPut(HgfsWbPage *req) // IN: request of page data to write { HgfsWbRequestUnlock(req); HgfsWbRequestPut(req); } /* *---------------------------------------------------------------------- * * HgfsWbRequestListAdd -- * * Add the write-back page request into the list. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static inline void HgfsWbRequestListAdd(HgfsWbPage *req, // IN: request of page data to write struct list_head *head) // IN: list of requests { list_add_tail(&req->wb_list, head); } /* *---------------------------------------------------------------------- * * HgfsWbRequestListRemove -- * * Remove the write-back page request from the list. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static inline void HgfsWbRequestListRemove(HgfsWbPage *req) // IN: request of page data to write { if (!list_empty(&req->wb_list)) { list_del_init(&req->wb_list); } } /* *---------------------------------------------------------------------- * * HgfsWbRequestCreate -- * * Create the write-back page request. * * Results: * The new write-back page request. * * Side effects: * None * *---------------------------------------------------------------------- */ HgfsWbPage * HgfsWbRequestCreate(struct page *page) // IN: page of data to write { HgfsWbPage *wbReq; /* try to allocate the request struct */ wbReq = HgfsWbPageAlloc(); if (wbReq == NULL) { wbReq = ERR_PTR(-ENOMEM); goto exit; } /* * Initialize the request struct. Initially, we assume a * long write-back delay. This will be adjusted in * update_nfs_request below if the region is not locked. */ wbReq->wb_flags = 0; #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 13) init_waitqueue_head(&wbReq->wb_queue); #endif INIT_LIST_HEAD(&wbReq->wb_list); wbReq->wb_page = page; wbReq->wb_index = HGFS_PAGE_FILE_INDEX(page); page_cache_get(page); kref_init(&wbReq->wb_kref); exit: LOG(6, (KERN_WARNING "VMware hgfs: %s: (%p, %p)\n", __func__, wbReq, page)); return wbReq; } /* *---------------------------------------------------------------------- * * HgfsWbRequestDestroy -- * * Destroys by freeing up all resources allocated to the request. * Release page associated with a write-back request after it has completed. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsWbRequestDestroy(HgfsWbPage *req) // IN: write page request { struct page *page = req->wb_page; LOG(6, (KERN_WARNING"VMware hgfs: %s: (%p, %p)\n", __func__, req, req->wb_page)); if (page != NULL) { page_cache_release(page); req->wb_page = NULL; } } /* *---------------------------------------------------------------------- * * HgfsInodeFindWbRequest -- * * Finds if there is a write-back page request on this inode and returns it. * * Results: * NULL or the write-back request for the page. * * Side effects: * None * *---------------------------------------------------------------------- */ static HgfsWbPage * HgfsInodeFindWbRequest(struct inode *inode, // IN: inode of file to write to struct page *page) // IN: page of data to write { HgfsInodeInfo *iinfo; HgfsWbPage *req = NULL; HgfsWbPage *cur; iinfo = INODE_GET_II_P(inode); /* Linearly search the write back list for the correct req */ list_for_each_entry(cur, &iinfo->listWbPages, wb_list) { if (cur->wb_page == page) { req = cur; break; } } if (req != NULL) { HgfsWbRequestGet(req); } return req; } /* *---------------------------------------------------------------------- * * HgfsInodeFindExistingWbRequest -- * * Finds if there is a write-back page request on this inode and returns * locked. * If the request is busy (locked) then it drops the lock and waits for it * be not locked and searches the list again. * * Results: * NULL or the write-back request for the page. * * Side effects: * None * *---------------------------------------------------------------------- */ static HgfsWbPage * HgfsInodeFindExistingWbRequest(struct inode *inode, // IN: inode of file to write to struct page *page) // IN: page of data to write { HgfsWbPage *req; int error; spin_lock(&inode->i_lock); for (;;) { req = HgfsInodeFindWbRequest(inode, page); if (req == NULL) { goto out_exit; } /* * Try and lock the request if not already locked. * If we find it is already locked, busy, then we drop * the reference and wait to try again. Otherwise, * once newly locked we break out and return to the caller. */ if (HgfsWbRequestLock(req)) { break; } /* The request was in use, so wait and then retry */ spin_unlock(&inode->i_lock); error = HgfsWbRequestWait(req); HgfsWbRequestPut(req); if (error != 0) { goto out_nolock; } spin_lock(&inode->i_lock); } out_exit: spin_unlock(&inode->i_lock); return req; out_nolock: return ERR_PTR(error); } /* *---------------------------------------------------------------------- * * HgfsInodeAddWbRequest -- * * Add a write-back page request to an inode. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsInodeAddWbRequest(struct inode *inode, // IN: inode of file to write to HgfsWbPage *req) // IN: page write request { HgfsInodeInfo *iinfo = INODE_GET_II_P(inode); LOG(6, (KERN_WARNING "VMware hgfs: %s: (%p, %p, %lu)\n", __func__, inode, req->wb_page, iinfo->numWbPages)); /* Lock the request! */ HgfsWbRequestLock(req); HgfsWbRequestListAdd(req, &iinfo->listWbPages); iinfo->numWbPages++; HgfsWbRequestGet(req); } /* *---------------------------------------------------------------------- * * HgfsInodeRemoveWbRequest -- * * Remove a write-back page request from an inode. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsInodeRemoveWbRequest(struct inode *inode, // IN: inode of file written to HgfsWbPage *req) // IN: page write request { HgfsInodeInfo *iinfo = INODE_GET_II_P(inode); LOG(6, (KERN_CRIT "VMware hgfs: %s: (%p, %p, %lu)\n", __func__, inode, req->wb_page, iinfo->numWbPages)); iinfo->numWbPages--; HgfsWbRequestListRemove(req); HgfsWbRequestPut(req); } /* *---------------------------------------------------------------------- * * HgfsInodePageWbAdd -- * * Add a write-back page request to an inode. * If the page is already exists in the list for this inode nothing is * done, otherwise a new object is created for the page and added to the * inode list. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsInodePageWbAdd(struct inode *inode, // IN: inode of file to write to struct page *page) // IN: page of data to write { HgfsWbPage *req; LOG(6, (KERN_WARNING "VMware hgfs: %s: (%p, %p)\n", __func__, inode, page)); req = HgfsInodeFindExistingWbRequest(inode, page); if (req != NULL) { goto exit; } /* * We didn't find an existing write back request for that page so * we create one. */ req = HgfsWbRequestCreate(page); if (IS_ERR(req)) { goto exit; } spin_lock(&inode->i_lock); /* * Add the new write request for the page into our inode list to track. */ HgfsInodeAddWbRequest(inode, req); spin_unlock(&inode->i_lock); exit: if (!IS_ERR(req)) { HgfsWbRequestUnlockAndPut(req); } } /* *---------------------------------------------------------------------- * * HgfsInodePageWbRemove -- * * Remove a write-back page request from an inode. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsInodePageWbRemove(struct inode *inode, // IN: inode of file written to struct page *page) // IN: page of data written { HgfsWbPage *req; LOG(6, (KERN_WARNING "VMware hgfs: %s: (%p, %p)\n", __func__, inode, page)); req = HgfsInodeFindExistingWbRequest(inode, page); if (req == NULL) { goto exit; } spin_lock(&inode->i_lock); /* * Add the new write request for the page into our inode list to track. */ HgfsInodeRemoveWbRequest(inode, req); HgfsWbRequestUnlockAndPut(req); spin_unlock(&inode->i_lock); exit: return; } /* *---------------------------------------------------------------------- * * HgfsInodePageWbFind -- * * Find a write-back page request from an inode. * * Results: * TRUE if found an existing write for the page, FALSE otherwise. * * Side effects: * None * *---------------------------------------------------------------------- */ static Bool HgfsInodePageWbFind(struct inode *inode, // IN: inode of file written to struct page *page) // IN: page of data written { HgfsWbPage *req; Bool found = TRUE; LOG(6, (KERN_WARNING "VMware hgfs: %s: (%p, %p)\n", __func__, inode, page)); req = HgfsInodeFindExistingWbRequest(inode, page); if (req == NULL) { found = FALSE; goto exit; } spin_lock(&inode->i_lock); /* * Remove the write request lock and reference we just grabbed. */ HgfsWbRequestUnlockAndPut(req); spin_unlock(&inode->i_lock); exit: LOG(6, (KERN_WARNING "VMware hgfs: %s: (%p, %p) return %d\n", __func__, inode, page, found)); return found; } /* *---------------------------------------------------------------------- * * HgfsCheckReadModifyWrite -- * * Check if we can read the page from the server to get the valid data * for a page that we are in process of partially modifying and then * writing. * * We maybe required to read the page first if the file is open for * reading in addition to writing, the page is not marked as uptodate, * it is not dirty or waiting to be committed, indicating that it was * previously allocated and then modified, that there were valid bytes * of data in that range of the file, and that the new data won't completely * replace the old data in that range of the file. * * Results: * TRUE if we need to read valid data and can do so for the page, * FALSE otherwise. * * Side effects: * None * *---------------------------------------------------------------------- */ static Bool HgfsCheckReadModifyWrite(struct file *file, // IN: File to be written struct page *page, // IN: page of data written unsigned int pageFrom, // IN: position unsigned int pageTo) // IN: len { unsigned int pageLength = HgfsGetPageLength(page); struct inode *inode = page->mapping->host; Bool readPage = FALSE; LOG(6, (KERN_WARNING "VMware hgfs: %s: (%p, %u, %u)\n", __func__, page, pageFrom, pageTo)); if ((file->f_mode & FMODE_READ) && // opened for read? !HgfsInodePageWbFind(inode, page) && // I/O request already ? !PageUptodate(page) && // Up to date? pageLength > 0 && // valid bytes of file? (pageTo < pageLength || pageFrom != 0)) { // replace all valid bytes? readPage = TRUE; } LOG(6, (KERN_WARNING "VMware hgfs: %s: (%p, %u, %u) return %d\n", __func__, page, pageFrom, pageTo, readPage)); return readPage; } vmhgfs-only/transport.h 0000444 0000000 0000000 00000005025 13432725306 014237 0 ustar root root /********************************************************* * Copyright (C) 2009-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * transport.h -- */ #ifndef _HGFS_DRIVER_TRANSPORT_H_ #define _HGFS_DRIVER_TRANSPORT_H_ #include "request.h" #include "compat_mutex.h" #include "hgfsProto.h" /* * There are the operations a channel should implement. */ struct HgfsTransportChannel; typedef struct HgfsTransportChannelOps { Bool (*open)(struct HgfsTransportChannel *); void (*close)(struct HgfsTransportChannel *); HgfsReq* (*allocate)(size_t payloadSize); int (*send)(struct HgfsTransportChannel *, HgfsReq *); void (*free)(HgfsReq *); } HgfsTransportChannelOps; typedef enum { HGFS_CHANNEL_UNINITIALIZED, HGFS_CHANNEL_NOTCONNECTED, HGFS_CHANNEL_CONNECTED, HGFS_CHANNEL_DEAD, /* Error has been detected, need to shut it down. */ } HgfsChannelStatus; typedef struct HgfsTransportChannel { const char *name; /* Channel name. */ HgfsTransportChannelOps ops; /* Channel ops. */ HgfsChannelStatus status; /* Connection status. */ void *priv; /* Channel private data. */ compat_mutex_t connLock; /* Protect _this_ struct. */ } HgfsTransportChannel; /* Public functions (with respect to the entire module). */ void HgfsTransportInit(void); void HgfsTransportExit(void); HgfsReq *HgfsTransportAllocateRequest(size_t payloadSize); void HgfsTransportFreeRequest(HgfsReq *req); int HgfsTransportSendRequest(HgfsReq *req); HgfsReq *HgfsTransportGetPendingRequest(HgfsHandle id); void HgfsTransportRemovePendingRequest(HgfsReq *req); void HgfsTransportFinishRequest(HgfsReq *req, Bool success, Bool do_put); void HgfsTransportFlushRequests(void); void HgfsTransportMarkDead(void); HgfsTransportChannel *HgfsGetBdChannel(void); #endif // _HGFS_DRIVER_TRANSPORT_H_ vmhgfs-only/shared/ 0000755 0000000 0000000 00000000000 13432726375 013307 5 ustar root root vmhgfs-only/shared/compat_statfs.h 0000444 0000000 0000000 00000002306 13432725347 016324 0 ustar root root /********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_STATFS_H__ # define __COMPAT_STATFS_H__ /* vfs.h simply include statfs.h, but it knows what directory statfs.h is in. */ #include <linux/vfs.h> /* 2.5.74 renamed struct statfs to kstatfs. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 74) #define compat_kstatfs kstatfs #else #define compat_kstatfs statfs #endif #endif /* __COMPAT_STATFS_H__ */ vmhgfs-only/shared/vmciKernelAPI.h 0000444 0000000 0000000 00000002451 13432725350 016101 0 ustar root root /********************************************************* * Copyright (C) 2010,2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciKernelAPI.h -- * * Kernel API (current) exported from the VMCI host and guest drivers. */ #ifndef __VMCI_KERNELAPI_H__ #define __VMCI_KERNELAPI_H__ #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #include "includeCheck.h" /* With this file you always get the latest version. */ #include "vmciKernelAPI1.h" #include "vmciKernelAPI2.h" #include "vmciKernelAPI3.h" #endif /* !__VMCI_KERNELAPI_H__ */ vmhgfs-only/shared/community_source.h 0000444 0000000 0000000 00000003712 13432725350 017055 0 ustar root root /********************************************************* * Copyright (C) 2009-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * community_source.h -- * * Macros for excluding source code from community. */ #ifndef _COMMUNITY_SOURCE_H_ #define _COMMUNITY_SOURCE_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_VMKDRIVERS #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" /* * Convenience macro for COMMUNITY_SOURCE */ #undef EXCLUDE_COMMUNITY_SOURCE #ifdef COMMUNITY_SOURCE #define EXCLUDE_COMMUNITY_SOURCE(x) #else #define EXCLUDE_COMMUNITY_SOURCE(x) x #endif #undef COMMUNITY_SOURCE_AMD_SECRET #if !defined(COMMUNITY_SOURCE) || defined(AMD_SOURCE) /* * It's ok to include AMD_SECRET source code for non-Community Source, * or for drops directed at AMD. */ #define COMMUNITY_SOURCE_AMD_SECRET #endif #undef COMMUNITY_SOURCE_INTEL_SECRET #if !defined(COMMUNITY_SOURCE) || defined(INTEL_SOURCE) /* * It's ok to include INTEL_SECRET source code for non-Community Source, * or for drops directed at Intel. */ #define COMMUNITY_SOURCE_INTEL_SECRET #endif #endif vmhgfs-only/shared/vmci_iocontrols.h 0000444 0000000 0000000 00000062156 13432725350 016671 0 ustar root root /********************************************************* * Copyright (C) 2007-2014,2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmci_iocontrols.h * * The VMCI driver io controls. */ #ifndef _VMCI_IOCONTROLS_H_ #define _VMCI_IOCONTROLS_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMKERNEL #include "includeCheck.h" #include "vm_assert.h" #include "vmci_defs.h" #if defined(_WIN32) && defined(WINNT_DDK) /* We need to expose the API through an IOCTL on Windows. Use latest API. */ #include "vmciKernelAPI.h" #endif // _WIN32 && WINNT_DDK #if defined __cplusplus extern "C" { #endif /* *----------------------------------------------------------------------------- * * VMCIVA64ToPtr -- * * Convert a VA64 to a pointer. * * Results: * Virtual address. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void * VMCIVA64ToPtr(VA64 va64) // IN { #ifdef VM_64BIT ASSERT_ON_COMPILE(sizeof (void *) == 8); #else ASSERT_ON_COMPILE(sizeof (void *) == 4); // Check that nothing of value will be lost. ASSERT(!(va64 >> 32)); #endif return (void *)(uintptr_t)va64; } /* *----------------------------------------------------------------------------- * * VMCIPtrToVA64 -- * * Convert a pointer to a VA64. * * Results: * Virtual address. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE VA64 VMCIPtrToVA64(void const *ptr) // IN { ASSERT_ON_COMPILE(sizeof ptr <= sizeof (VA64)); return (VA64)(uintptr_t)ptr; } /* * Driver version. * * Increment major version when you make an incompatible change. * Compatibility goes both ways (old driver with new executable * as well as new driver with old executable). */ #define VMCI_VERSION_SHIFT_WIDTH 16 /* Never change this. */ #define VMCI_MAKE_VERSION(_major, _minor) ((_major) << \ VMCI_VERSION_SHIFT_WIDTH | \ (uint16) (_minor)) #define VMCI_VERSION_MAJOR(v) ((uint32) (v) >> VMCI_VERSION_SHIFT_WIDTH) #define VMCI_VERSION_MINOR(v) ((uint16) (v)) /* * VMCI_VERSION is always the current version. Subsequently listed * versions are ways of detecting previous versions of the connecting * application (i.e., VMX). * * VMCI_VERSION_NOVMVM: This version removed support for VM to VM * communication. * * VMCI_VERSION_NOTIFY: This version introduced doorbell notification * support. * * VMCI_VERSION_HOSTQP: This version introduced host end point support * for hosted products. * * VMCI_VERSION_PREHOSTQP: This is the version prior to the adoption of * support for host end-points. * * VMCI_VERSION_PREVERS2: This fictional version number is intended to * represent the version of a VMX which doesn't call into the driver * with ioctl VERSION2 and thus doesn't establish its version with the * driver. */ #define VMCI_VERSION VMCI_VERSION_NOVMVM #define VMCI_VERSION_NOVMVM VMCI_MAKE_VERSION(11, 0) #define VMCI_VERSION_NOTIFY VMCI_MAKE_VERSION(10, 0) #define VMCI_VERSION_HOSTQP VMCI_MAKE_VERSION(9, 0) #define VMCI_VERSION_PREHOSTQP VMCI_MAKE_VERSION(8, 0) #define VMCI_VERSION_PREVERS2 VMCI_MAKE_VERSION(1, 0) /* * VMCISockets driver version. The version is platform-dependent and is * embedded in vsock_version.h for each platform. It can be obtained via * VMCISock_Version() (which uses IOCTL_VMCI_SOCKETS_VERSION). The * following is simply for constructing an unsigned integer value from the * comma-separated version in the header. This must match the macros defined * in vmci_sockets.h. An example of using this is: * uint16 parts[4] = { VSOCK_DRIVER_VERSION_COMMAS }; * uint32 version = VMCI_SOCKETS_MAKE_VERSION(parts); */ #define VMCI_SOCKETS_MAKE_VERSION(_p) \ ((((_p)[0] & 0xFF) << 24) | (((_p)[1] & 0xFF) << 16) | ((_p)[2])) #if defined(__linux__) || defined(VMKERNEL) /* * Linux defines _IO* macros, but the core kernel code ignore the encoded * ioctl value. It is up to individual drivers to decode the value (for * example to look at the size of a structure to determine which version * of a specific command should be used) or not (which is what we * currently do, so right now the ioctl value for a given command is the * command itself). * * Hence, we just define the IOCTL_VMCI_foo values directly, with no * intermediate IOCTLCMD_ representation. */ # define IOCTLCMD(_cmd) IOCTL_VMCI_ ## _cmd #elif defined (__APPLE__) #include <sys/ioccom.h> #define IOCTLCMD(_cmd) IOCTL_VMCI_ ## _cmd #define IOCTLCMD_I(_cmd, _type) \ IOCTL_VMCI_MACOS_ ## _cmd = _IOW('V', IOCTL_VMCI_ ## _cmd, _type) #define IOCTLCMD_O(_cmd, _type) \ IOCTL_VMCI_MACOS_ ## _cmd = _IOR('V', IOCTL_VMCI_ ## _cmd, _type) #define IOCTLCMD_IO(_cmd, _type) \ IOCTL_VMCI_MACOS_ ## _cmd = _IOWR('V', IOCTL_VMCI_ ## _cmd, _type) #else // if defined(__linux__) /* * On platforms other than Linux, IOCTLCMD_foo values are just numbers, and * we build the IOCTL_VMCI_foo values around these using platform-specific * format for encoding arguments and sizes. */ # define IOCTLCMD(_cmd) IOCTLCMD_VMCI_ ## _cmd #endif enum IOCTLCmd_VMCI { /* * We need to bracket the range of values used for ioctls, because x86_64 * Linux forces us to explicitly register ioctl handlers by value for * handling 32 bit ioctl syscalls. Hence FIRST and LAST. Pick something * for FIRST that doesn't collide with vmmon (2001+). */ #if defined(__linux__) IOCTLCMD(FIRST) = 1951, #else /* Start at 0. */ IOCTLCMD(FIRST), #endif IOCTLCMD(VERSION) = IOCTLCMD(FIRST), /* BEGIN VMCI */ IOCTLCMD(INIT_CONTEXT), /* * The following two were used for process and datagram process creation. * They are not used anymore and reserved for future use. * They will fail if issued. */ IOCTLCMD(RESERVED1), IOCTLCMD(RESERVED2), /* * The following used to be for shared memory. It is now unused and and is * reserved for future use. It will fail if issued. */ IOCTLCMD(RESERVED3), /* * The follwoing three were also used to be for shared memory. An * old WS6 user-mode client might try to use them with the new * driver, but since we ensure that only contexts created by VMX'en * of the appropriate version (VMCI_VERSION_NOTIFY or * VMCI_VERSION_NEWQP) or higher use these ioctl, everything is * fine. */ IOCTLCMD(QUEUEPAIR_SETVA), IOCTLCMD(NOTIFY_RESOURCE), IOCTLCMD(NOTIFICATIONS_RECEIVE), IOCTLCMD(VERSION2), IOCTLCMD(QUEUEPAIR_ALLOC), IOCTLCMD(QUEUEPAIR_SETPAGEFILE), IOCTLCMD(QUEUEPAIR_DETACH), IOCTLCMD(DATAGRAM_SEND), IOCTLCMD(DATAGRAM_RECEIVE), IOCTLCMD(DATAGRAM_REQUEST_MAP), IOCTLCMD(DATAGRAM_REMOVE_MAP), IOCTLCMD(CTX_ADD_NOTIFICATION), IOCTLCMD(CTX_REMOVE_NOTIFICATION), IOCTLCMD(CTX_GET_CPT_STATE), IOCTLCMD(CTX_SET_CPT_STATE), IOCTLCMD(GET_CONTEXT_ID), /* END VMCI */ /* * BEGIN VMCI SOCKETS * * We mark the end of the vmci commands and the start of the vmci sockets * commands since they are used in separate modules on Linux. * */ IOCTLCMD(LAST), IOCTLCMD(SOCKETS_FIRST) = IOCTLCMD(LAST), /* * This used to be for accept() on Windows and Mac OS, which is now * redundant (since we now use real handles). It is used instead for * getting the version. This value is now public, so it cannot change. */ IOCTLCMD(SOCKETS_VERSION) = IOCTLCMD(SOCKETS_FIRST), IOCTLCMD(SOCKETS_BIND), /* * This used to be for close() on Windows and Mac OS, but is no longer * used for the same reason as accept() above. It is used instead for * sending private symbols to the Mac OS driver. */ IOCTLCMD(SOCKETS_SET_SYMBOLS), IOCTLCMD(SOCKETS_CONNECT), /* * The next two values are public (vmci_sockets.h) and cannot be changed. * That means the number of values above these cannot be changed either * unless the base index (specified below) is updated accordingly. */ IOCTLCMD(SOCKETS_GET_AF_VALUE), IOCTLCMD(SOCKETS_GET_LOCAL_CID), IOCTLCMD(SOCKETS_GET_SOCK_NAME), IOCTLCMD(SOCKETS_GET_SOCK_OPT), IOCTLCMD(SOCKETS_GET_VM_BY_NAME), IOCTLCMD(SOCKETS_IOCTL), IOCTLCMD(SOCKETS_LISTEN), IOCTLCMD(SOCKETS_RECV), IOCTLCMD(SOCKETS_RECV_FROM), IOCTLCMD(SOCKETS_SELECT), IOCTLCMD(SOCKETS_SEND), IOCTLCMD(SOCKETS_SEND_TO), IOCTLCMD(SOCKETS_SET_SOCK_OPT), IOCTLCMD(SOCKETS_SHUTDOWN), IOCTLCMD(SOCKETS_SOCKET), IOCTLCMD(SOCKETS_UUID_2_CID), /* 1991 on Linux. */ /* END VMCI SOCKETS */ /* * We reserve a range of 3 ioctls for VMCI Sockets to grow. We cannot * reserve many ioctls here since we are close to overlapping with vmmon * ioctls. Define a meta-ioctl if running out of this binary space. */ // Must be last. IOCTLCMD(SOCKETS_LAST) = IOCTLCMD(SOCKETS_UUID_2_CID) + 3, /* 1994 on Linux. */ /* * The VSockets ioctls occupy the block above. We define a new range of * VMCI ioctls to maintain binary compatibility between the user land and * the kernel driver. Careful, vmmon ioctls start from 2001, so this means * we can add only 4 new VMCI ioctls. Define a meta-ioctl if running out of * this binary space. */ IOCTLCMD(FIRST2), IOCTLCMD(SET_NOTIFY) = IOCTLCMD(FIRST2), /* 1995 on Linux. */ IOCTLCMD(LAST2), }; #if defined (__APPLE__) /* * The size of this must match the size of VSockIoctlPrivSyms in * modules/vsock/common/vsockIoctl.h. */ #pragma pack(push, 1) struct IOCTLCmd_VMCIMacOS_PrivSyms { char data[344]; }; #pragma pack(pop) enum IOCTLCmd_VMCIMacOS { IOCTLCMD_I(SOCKETS_SET_SYMBOLS, struct IOCTLCmd_VMCIMacOS_PrivSyms), IOCTLCMD_O(SOCKETS_VERSION, unsigned int), IOCTLCMD_O(SOCKETS_GET_AF_VALUE, int), IOCTLCMD_O(SOCKETS_GET_LOCAL_CID, unsigned int), }; #endif // __APPLE__ #if defined _WIN32 /* * Windows VMCI ioctl definitions. */ /* PUBLIC: For VMCISockets user-mode clients that use CreateFile(). */ #define VMCI_INTERFACE_VSOCK_PUBLIC_NAME TEXT("\\\\.\\VMCI") /* PUBLIC: For VMCISockets user-mode clients that use NtCreateFile(). */ #define VMCI_INTERFACE_VSOCK_PUBLIC_NAME_NT L"\\??\\VMCI" /* PUBLIC: For the VMX, which uses CreateFile(). */ #define VMCI_INTERFACE_VMX_PUBLIC_NAME TEXT("\\\\.\\VMCIDev\\VMX") /* PRIVATE NAMES */ #define VMCI_DEVICE_VMCI_LINK_PATH L"\\DosDevices\\VMCIDev" #define VMCI_DEVICE_VSOCK_LINK_PATH L"\\DosDevices\\vmci" #define VMCI_DEVICE_HOST_NAME_PATH L"\\Device\\VMCIHostDev" #define VMCI_DEVICE_GUEST_NAME_PATH L"\\Device\\VMCIGuestDev" /* PRIVATE NAMES */ /* These values cannot be changed since some of the ioctl values are public. */ #define FILE_DEVICE_VMCI 0x8103 #define VMCI_IOCTL_BASE_INDEX 0x801 #define VMCIIOCTL_BUFFERED(name) \ CTL_CODE(FILE_DEVICE_VMCI, \ VMCI_IOCTL_BASE_INDEX + IOCTLCMD_VMCI_ ## name, \ METHOD_BUFFERED, \ FILE_ANY_ACCESS) #define VMCIIOCTL_NEITHER(name) \ CTL_CODE(FILE_DEVICE_VMCI, \ VMCI_IOCTL_BASE_INDEX + IOCTLCMD_VMCI_ ## name, \ METHOD_NEITHER, \ FILE_ANY_ACCESS) enum IOCTLCmd_VMCIWin32 { IOCTLCMD(DEVICE_GET) = IOCTLCMD(LAST2) + 1, IOCTLCMD(SOCKETS_SERVICE_GET), IOCTLCMD(SOCKETS_STOP), }; #define IOCTL_VMCI_VERSION VMCIIOCTL_BUFFERED(VERSION) /* BEGIN VMCI */ #define IOCTL_VMCI_INIT_CONTEXT \ VMCIIOCTL_BUFFERED(INIT_CONTEXT) #define IOCTL_VMCI_HYPERCALL \ VMCIIOCTL_BUFFERED(HYPERCALL) #define IOCTL_VMCI_CREATE_DATAGRAM_HANDLE \ VMCIIOCTL_BUFFERED(CREATE_DATAGRAM_HANDLE) #define IOCTL_VMCI_DESTROY_DATAGRAM_HANDLE \ VMCIIOCTL_BUFFERED(DESTROY_DATAGRAM_HANDLE) #define IOCTL_VMCI_NOTIFY_RESOURCE \ VMCIIOCTL_BUFFERED(NOTIFY_RESOURCE) #define IOCTL_VMCI_NOTIFICATIONS_RECEIVE \ VMCIIOCTL_BUFFERED(NOTIFICATIONS_RECEIVE) #define IOCTL_VMCI_VERSION2 \ VMCIIOCTL_BUFFERED(VERSION2) #define IOCTL_VMCI_QUEUEPAIR_ALLOC \ VMCIIOCTL_BUFFERED(QUEUEPAIR_ALLOC) #define IOCTL_VMCI_QUEUEPAIR_SETVA \ VMCIIOCTL_BUFFERED(QUEUEPAIR_SETVA) #define IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE \ VMCIIOCTL_BUFFERED(QUEUEPAIR_SETPAGEFILE) #define IOCTL_VMCI_QUEUEPAIR_DETACH \ VMCIIOCTL_BUFFERED(QUEUEPAIR_DETACH) #define IOCTL_VMCI_DATAGRAM_SEND \ VMCIIOCTL_BUFFERED(DATAGRAM_SEND) #define IOCTL_VMCI_DATAGRAM_RECEIVE \ VMCIIOCTL_NEITHER(DATAGRAM_RECEIVE) #define IOCTL_VMCI_DATAGRAM_REQUEST_MAP \ VMCIIOCTL_BUFFERED(DATAGRAM_REQUEST_MAP) #define IOCTL_VMCI_DATAGRAM_REMOVE_MAP \ VMCIIOCTL_BUFFERED(DATAGRAM_REMOVE_MAP) #define IOCTL_VMCI_CTX_ADD_NOTIFICATION \ VMCIIOCTL_BUFFERED(CTX_ADD_NOTIFICATION) #define IOCTL_VMCI_CTX_REMOVE_NOTIFICATION \ VMCIIOCTL_BUFFERED(CTX_REMOVE_NOTIFICATION) #define IOCTL_VMCI_CTX_GET_CPT_STATE \ VMCIIOCTL_BUFFERED(CTX_GET_CPT_STATE) #define IOCTL_VMCI_CTX_SET_CPT_STATE \ VMCIIOCTL_BUFFERED(CTX_SET_CPT_STATE) #define IOCTL_VMCI_GET_CONTEXT_ID \ VMCIIOCTL_BUFFERED(GET_CONTEXT_ID) #define IOCTL_VMCI_DEVICE_GET \ VMCIIOCTL_BUFFERED(DEVICE_GET) /* END VMCI */ /* BEGIN VMCI SOCKETS */ #define IOCTL_VMCI_SOCKETS_VERSION \ VMCIIOCTL_BUFFERED(SOCKETS_VERSION) #define IOCTL_VMCI_SOCKETS_BIND \ VMCIIOCTL_BUFFERED(SOCKETS_BIND) #define IOCTL_VMCI_SOCKETS_CONNECT \ VMCIIOCTL_BUFFERED(SOCKETS_CONNECT) #define IOCTL_VMCI_SOCKETS_GET_AF_VALUE \ VMCIIOCTL_BUFFERED(SOCKETS_GET_AF_VALUE) #define IOCTL_VMCI_SOCKETS_GET_LOCAL_CID \ VMCIIOCTL_BUFFERED(SOCKETS_GET_LOCAL_CID) #define IOCTL_VMCI_SOCKETS_GET_SOCK_NAME \ VMCIIOCTL_BUFFERED(SOCKETS_GET_SOCK_NAME) #define IOCTL_VMCI_SOCKETS_GET_SOCK_OPT \ VMCIIOCTL_BUFFERED(SOCKETS_GET_SOCK_OPT) #define IOCTL_VMCI_SOCKETS_GET_VM_BY_NAME \ VMCIIOCTL_BUFFERED(SOCKETS_GET_VM_BY_NAME) #define IOCTL_VMCI_SOCKETS_IOCTL \ VMCIIOCTL_BUFFERED(SOCKETS_IOCTL) #define IOCTL_VMCI_SOCKETS_LISTEN \ VMCIIOCTL_BUFFERED(SOCKETS_LISTEN) #define IOCTL_VMCI_SOCKETS_RECV_FROM \ VMCIIOCTL_BUFFERED(SOCKETS_RECV_FROM) #define IOCTL_VMCI_SOCKETS_SELECT \ VMCIIOCTL_BUFFERED(SOCKETS_SELECT) #define IOCTL_VMCI_SOCKETS_SEND_TO \ VMCIIOCTL_BUFFERED(SOCKETS_SEND_TO) #define IOCTL_VMCI_SOCKETS_SET_SOCK_OPT \ VMCIIOCTL_BUFFERED(SOCKETS_SET_SOCK_OPT) #define IOCTL_VMCI_SOCKETS_SHUTDOWN \ VMCIIOCTL_BUFFERED(SOCKETS_SHUTDOWN) #define IOCTL_VMCI_SOCKETS_SERVICE_GET \ VMCIIOCTL_BUFFERED(SOCKETS_SERVICE_GET) #define IOCTL_VMCI_SOCKETS_STOP \ VMCIIOCTL_NEITHER(SOCKETS_STOP) /* END VMCI SOCKETS */ #endif // _WIN32 /* * VMCI driver initialization. This block can also be used to * pass initial group membership etc. */ typedef struct VMCIInitBlock { VMCIId cid; VMCIPrivilegeFlags flags; #ifdef _WIN32 uint64 event; /* Handle for signalling vmci calls on windows. */ #endif // _WIN32 } VMCIInitBlock; typedef struct VMCISharedMemInfo { VMCIHandle handle; uint32 size; uint32 result; VA64 va; /* Currently only used in the guest. */ char pageFileName[VMCI_PATH_MAX]; } VMCISharedMemInfo; typedef struct VMCIQueuePairAllocInfo_VMToVM { VMCIHandle handle; VMCIId peer; uint32 flags; uint64 produceSize; uint64 consumeSize; #if !defined(VMX86_SERVER) && !defined(VMKERNEL) VA64 producePageFile; /* User VA. */ VA64 consumePageFile; /* User VA. */ uint64 producePageFileSize; /* Size of the file name array. */ uint64 consumePageFileSize; /* Size of the file name array. */ #else PPN64 * PPNs; uint64 numPPNs; #endif int32 result; uint32 _pad; } VMCIQueuePairAllocInfo_VMToVM; typedef struct VMCIQueuePairAllocInfo { VMCIHandle handle; VMCIId peer; uint32 flags; uint64 produceSize; uint64 consumeSize; #if !defined(VMX86_SERVER) && !defined(VMKERNEL) VA64 ppnVA; /* Start VA of queue pair PPNs. */ #else PPN64 * PPNs; #endif uint64 numPPNs; int32 result; uint32 version; } VMCIQueuePairAllocInfo; typedef struct VMCIQueuePairSetVAInfo { VMCIHandle handle; VA64 va; /* Start VA of queue pair PPNs. */ uint64 numPPNs; uint32 version; int32 result; } VMCIQueuePairSetVAInfo; /* * For backwards compatibility, here is a version of the * VMCIQueuePairPageFileInfo before host support end-points was added. * Note that the current version of that structure requires VMX to * pass down the VA of the mapped file. Before host support was added * there was nothing of the sort. So, when the driver sees the ioctl * with a parameter that is the sizeof * VMCIQueuePairPageFileInfo_NoHostQP then it can infer that the version * of VMX running can't attach to host end points because it doesn't * provide the VA of the mapped files. * * The Linux driver doesn't get an indication of the size of the * structure passed down from user space. So, to fix a long standing * but unfiled bug, the _pad field has been renamed to version. * Existing versions of VMX always initialize the PageFileInfo * structure so that _pad, er, version is set to 0. * * A version value of 1 indicates that the size of the structure has * been increased to include two UVA's: produceUVA and consumeUVA. * These UVA's are of the mmap()'d queue contents backing files. * * In addition, if when VMX is sending down the * VMCIQueuePairPageFileInfo structure it gets an error then it will * try again with the _NoHostQP version of the file to see if an older * VMCI kernel module is running. */ typedef struct VMCIQueuePairPageFileInfo_NoHostQP { VMCIHandle handle; VA64 producePageFile; /* User VA. */ VA64 consumePageFile; /* User VA. */ uint64 producePageFileSize; /* Size of the file name array. */ uint64 consumePageFileSize; /* Size of the file name array. */ int32 result; uint32 version; /* Was _pad. Must be 0. */ } VMCIQueuePairPageFileInfo_NoHostQP; typedef struct VMCIQueuePairPageFileInfo { VMCIHandle handle; #if !defined(VMX86_SERVER) && !defined(VMKERNEL) VA64 producePageFile; /* User VA. */ VA64 consumePageFile; /* User VA. */ uint64 producePageFileSize; /* Size of the file name array. */ uint64 consumePageFileSize; /* Size of the file name array. */ #endif int32 result; uint32 version; /* Was _pad. */ VA64 produceVA; /* User VA of the mapped file. */ VA64 consumeVA; /* User VA of the mapped file. */ } VMCIQueuePairPageFileInfo; typedef struct VMCIQueuePairDetachInfo { VMCIHandle handle; int32 result; uint32 _pad; } VMCIQueuePairDetachInfo; typedef struct VMCIDatagramSendRecvInfo { VA64 addr; uint32 len; int32 result; } VMCIDatagramSendRecvInfo; /* Used to add/remove well-known datagram mappings. */ typedef struct VMCIDatagramMapInfo { VMCIId wellKnownID; int result; } VMCIDatagramMapInfo; /* Used to add/remove remote context notifications. */ typedef struct VMCINotifyAddRemoveInfo { VMCIId remoteCID; int result; } VMCINotifyAddRemoveInfo; /* Used to set/get current context's checkpoint state. */ typedef struct VMCICptBufInfo { VA64 cptBuf; uint32 cptType; uint32 bufSize; int32 result; uint32 _pad; } VMCICptBufInfo; /* Used to pass notify flag's address to the host driver. */ typedef struct VMCISetNotifyInfo { VA64 notifyUVA; int32 result; uint32 _pad; } VMCISetNotifyInfo; #define VMCI_NOTIFY_RESOURCE_QUEUE_PAIR 0 #define VMCI_NOTIFY_RESOURCE_DOOR_BELL 1 #define VMCI_NOTIFY_RESOURCE_ACTION_NOTIFY 0 #define VMCI_NOTIFY_RESOURCE_ACTION_CREATE 1 #define VMCI_NOTIFY_RESOURCE_ACTION_DESTROY 2 /* * Used to create and destroy doorbells, and generate a notification * for a doorbell or queue pair. */ typedef struct VMCINotifyResourceInfo { VMCIHandle handle; uint16 resource; uint16 action; int32 result; } VMCINotifyResourceInfo; /* * Used to recieve pending notifications for doorbells and queue * pairs. */ typedef struct VMCINotificationReceiveInfo { VA64 dbHandleBufUVA; uint64 dbHandleBufSize; VA64 qpHandleBufUVA; uint64 qpHandleBufSize; int32 result; uint32 _pad; } VMCINotificationReceiveInfo; #if defined(_WIN32) && defined(WINNT_DDK) /* * Used on Windows to expose the API calls that are no longer exported. This * is kernel-mode only, and both sides will have the same bitness, so we can * use pointers directly. */ /* Version 1. */ typedef struct VMCIDeviceGetInfoVer1 { VMCI_DeviceReleaseFct *deviceRelease; VMCIDatagram_CreateHndFct *dgramCreateHnd; VMCIDatagram_CreateHndPrivFct *dgramCreateHndPriv; VMCIDatagram_DestroyHndFct *dgramDestroyHnd; VMCIDatagram_SendFct *dgramSend; VMCI_GetContextIDFct *getContextId; VMCI_VersionFct *version; VMCIEvent_SubscribeFct *eventSubscribe; VMCIEvent_UnsubscribeFct *eventUnsubscribe; VMCIQPair_AllocFct *qpairAlloc; VMCIQPair_DetachFct *qpairDetach; VMCIQPair_GetProduceIndexesFct *qpairGetProduceIndexes; VMCIQPair_GetConsumeIndexesFct *qpairGetConsumeIndexes; VMCIQPair_ProduceFreeSpaceFct *qpairProduceFreeSpace; VMCIQPair_ProduceBufReadyFct *qpairProduceBufReady; VMCIQPair_ConsumeFreeSpaceFct *qpairConsumeFreeSpace; VMCIQPair_ConsumeBufReadyFct *qpairConsumeBufReady; VMCIQPair_EnqueueFct *qpairEnqueue; VMCIQPair_DequeueFct *qpairDequeue; VMCIQPair_PeekFct *qpairPeek; VMCIQPair_EnqueueVFct *qpairEnqueueV; VMCIQPair_DequeueVFct *qpairDequeueV; VMCIQPair_PeekVFct *qpairPeekV; VMCI_ContextID2HostVmIDFct *contextID2HostVmID; VMCI_IsContextOwnerFct *isContextOwner; VMCIContext_GetPrivFlagsFct *contextGetPrivFlags; } VMCIDeviceGetInfoVer1; /* Version 2. */ typedef struct VMCIDeviceGetInfoVer2 { VMCIDoorbell_CreateFct *doorbellCreate; VMCIDoorbell_DestroyFct *doorbellDestroy; VMCIDoorbell_NotifyFct *doorbellNotify; } VMCIDeviceGetInfoVer2; typedef struct VMCIDeviceGetInfoHdr { /* Requested API version on input, supported version on output. */ uint32 apiVersion; VMCI_DeviceShutdownFn *deviceShutdownCB; void *userData; void *deviceRegistration; } VMCIDeviceGetInfoHdr; /* Combination of all versions. */ typedef struct VMCIDeviceGetInfo { VMCIDeviceGetInfoHdr hdr; VMCIDeviceGetInfoVer1 ver1; VMCIDeviceGetInfoVer2 ver2; } VMCIDeviceGetInfo; #endif // _WIN32 && WINNT_DDK #ifdef __APPLE__ /* * Mac OS ioctl definitions. * * Mac OS defines _IO* macros, and the core kernel code uses the size encoded * in the ioctl value to copy the memory back and forth (depending on the * direction encoded in the ioctl value) between the user and kernel address * spaces. * See iocontrolsMacOS.h for details on how this is done. We use sockets only * for vmci. */ #include <sys/ioccom.h> enum VMCrossTalkSockOpt { VMCI_SO_VERSION = 0, VMCI_SO_CONTEXT = IOCTL_VMCI_INIT_CONTEXT, VMCI_SO_NOTIFY_RESOURCE = IOCTL_VMCI_NOTIFY_RESOURCE, VMCI_SO_NOTIFICATIONS_RECEIVE = IOCTL_VMCI_NOTIFICATIONS_RECEIVE, VMCI_SO_VERSION2 = IOCTL_VMCI_VERSION2, VMCI_SO_QUEUEPAIR_ALLOC = IOCTL_VMCI_QUEUEPAIR_ALLOC, VMCI_SO_QUEUEPAIR_SETVA = IOCTL_VMCI_QUEUEPAIR_SETVA, VMCI_SO_QUEUEPAIR_SETPAGEFILE = IOCTL_VMCI_QUEUEPAIR_SETPAGEFILE, VMCI_SO_QUEUEPAIR_DETACH = IOCTL_VMCI_QUEUEPAIR_DETACH, VMCI_SO_DATAGRAM_SEND = IOCTL_VMCI_DATAGRAM_SEND, VMCI_SO_DATAGRAM_RECEIVE = IOCTL_VMCI_DATAGRAM_RECEIVE, VMCI_SO_DATAGRAM_REQUEST_MAP = IOCTL_VMCI_DATAGRAM_REQUEST_MAP, VMCI_SO_DATAGRAM_REMOVE_MAP = IOCTL_VMCI_DATAGRAM_REMOVE_MAP, VMCI_SO_CTX_ADD_NOTIFICATION = IOCTL_VMCI_CTX_ADD_NOTIFICATION, VMCI_SO_CTX_REMOVE_NOTIFICATION = IOCTL_VMCI_CTX_REMOVE_NOTIFICATION, VMCI_SO_CTX_GET_CPT_STATE = IOCTL_VMCI_CTX_GET_CPT_STATE, VMCI_SO_CTX_SET_CPT_STATE = IOCTL_VMCI_CTX_SET_CPT_STATE, VMCI_SO_GET_CONTEXT_ID = IOCTL_VMCI_GET_CONTEXT_ID, VMCI_SO_USERFD, }; #define VMCI_MACOS_HOST_DEVICE "com.vmware.kext.vmci" #endif /* Clean up helper macros */ #undef IOCTLCMD #if defined __cplusplus } // extern "C" #endif #endif // ifndef _VMCI_IOCONTROLS_H_ vmhgfs-only/shared/kernelStubsSal.h 0000444 0000000 0000000 00000012205 13432725330 016405 0 ustar root root /********************************************************* * Copyright (C) 2015-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * kernelStubsSal.h * * Contains definitions source annotation language definitions for kernel drivers. * This solves two issues: * 1. Microsoft changed their annotation language from SAL 1.0 (original one * widely distributed by the Windows team) to their more final SAL 2.0 * langauge (championed by the VS team). * 2. We want these annotations to do nothing during non-Win32 compiles. * * A longer term goal is to rationalize this into Bora. */ #ifndef __KERNELSTUBSSAL_H__ #define __KERNELSTUBSSAL_H__ #if defined(_WIN32) # include <DriverSpecs.h> # if !defined(_SAL_VERSION) # define _SAL_VERSION 10 # endif #endif #if !defined(_SAL_VERSION) || (defined(_SAL_VERSION) && _SAL_VERSION == 10) #define _In_ #define _In_opt_ #define _In_reads_bytes_(count) #define _In_reads_bytes_opt_(count) #define _In_z_ #define _In_opt_z_ #define _Out_ #define _Out_opt_ #define _Out_writes_bytes_(capcount) #define _Out_writes_bytes_opt_(capcount) #define _Out_writes_bytes_to_(cap, count) #define _Out_writes_bytes_to_opt_(cap, count) #define _Out_bytecap_post_bytecount_(cap, count) #define _Out_writes_z_(cap) #define _Out_writes_opt_z_(cap) #define _Out_z_cap_(e) #define _Outptr_result_buffer_(count) #define _Outptr_result_bytebuffer_(count) #define _Outptr_result_bytebuffer_maybenull_(count) #define _Outptr_opt_result_buffer_(count) #define _Outptr_opt_result_bytebuffer_(count) #define _Outptr_opt_result_bytebuffer_maybenull_(count) #define _COM_Outptr_ #define _Inout_ #define _Inout_updates_bytes_(e) #define _Inout_z_cap_(e) #define _Post_z_count_(e) #define _Ret_writes_z_(e) #define _Ret_writes_maybenull_z_(e) #define _Ret_maybenull_ #define _Ret_maybenull_z_ #define _Ret_range_(l,h) #define _Success_(expr) #define _Check_return_ #define _Must_inspect_result_ #define _Group_(annos) #define _When_(expr, annos) #define _Always_(annos) #define _Printf_format_string_ #define _Use_decl_annotations_ #define _Dispatch_type_(mj) #define _Function_class_(c) #define _Requires_lock_held_(cs) #define _Requires_lock_not_held_(cs) #define _Acquires_lock_(l) #define _Releases_lock_(l) #define _IRQL_requires_max_(i) #define _IRQL_requires_(i) #define _IRQL_requires_same_ #define _Analysis_assume_(e) #define _Pre_notnull_ #define _At_(expr,annos) #else // Sal 2.0 path - everything is already defined. #endif // _SAL_VERSION // Now define our own annotations #if !defined(_SAL_VERSION) || (defined(_SAL_VERSION) && _SAL_VERSION == 10) #define _When_windrv_(annos) #define _Ret_allocates_malloc_mem_opt_bytecap_(_Size) #define _Ret_allocates_malloc_mem_opt_bytecount_(_Size) #define _Ret_allocates_malloc_mem_opt_bytecap_post_bytecount_(_Cap,_Count) #define _Ret_allocates_malloc_mem_opt_z_bytecount_(_Size) #define _Ret_allocates_malloc_mem_opt_z_ #define _In_frees_malloc_mem_opt_ #else #define _When_windrv_(annos) annos #define _Ret_allocates_malloc_mem_opt_bytecap_(_Cap) __drv_allocatesMem("Memory") _Must_inspect_result_ _Ret_opt_bytecap_(_Cap) #define _Ret_allocates_malloc_mem_opt_bytecount_(_Count) __drv_allocatesMem("Memory") _Must_inspect_result_ _Ret_opt_bytecount_(_Count) #define _Ret_allocates_malloc_mem_opt_bytecap_post_bytecount_(_Cap,_Count) __drv_allocatesMem("Memory") _Must_inspect_result_ _Ret_opt_bytecap_(_Cap) _Ret_opt_bytecount_(_Count) #define _Ret_allocates_malloc_mem_opt_z_bytecount_(_Count) __drv_allocatesMem("Memory") _Must_inspect_result_ _Ret_opt_z_bytecount_(_Count) #define _Ret_allocates_malloc_mem_opt_z_ __drv_allocatesMem("Memory") _Must_inspect_result_ _Ret_opt_z_ #define _In_frees_malloc_mem_opt_ __drv_freesMem("Memory") _Pre_maybenull_ _Post_invalid_ #endif // _SAL_VERSION // Best we can do for reallocate with simple annotations: assume old size was fully initialized. #define _Ret_reallocates_malloc_mem_opt_newbytecap_oldbytecap_(_NewSize, _OldSize) _Ret_allocates_malloc_mem_opt_bytecap_post_bytecount_(_NewSize, _OldSize <= _NewSize ? _OldSize : _NewSize) #define _Ret_reallocates_malloc_mem_opt_newbytecap_(_NewSize) _Ret_allocates_malloc_mem_opt_z_bytecount_(_NewSize) #define _In_reallocates_malloc_mem_opt_oldptr_ _In_frees_malloc_mem_opt_ #endif // __KERNELSTUBSSAL_H__ vmhgfs-only/shared/vm_assert.h 0000444 0000000 0000000 00000024303 13432725350 015453 0 ustar root root /********************************************************* * Copyright (C) 1998-2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vm_assert.h -- * * The basic assertion facility for all VMware code. * * For proper use, see bora/doc/assert and * http://vmweb.vmware.com/~mts/WebSite/guide/programming/asserts.html. */ #ifndef _VM_ASSERT_H_ #define _VM_ASSERT_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_VMKDRIVERS #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" // XXX not necessary except some places include vm_assert.h improperly #include "vm_basic_types.h" #ifdef __cplusplus extern "C" { #endif /* * Some bits of vmcore are used in VMKernel code and cannot have * the VMKERNEL define due to other header dependencies. */ #if defined(VMKERNEL) && !defined(VMKPANIC) #define VMKPANIC 1 #endif /* * Internal macros, functions, and strings * * The monitor wants to save space at call sites, so it has specialized * functions for each situation. User level wants to save on implementation * so it uses generic functions. */ #if !defined VMM || defined MONITOR_APP // { #if defined (VMKPANIC) #include "vmk_assert.h" #else /* !VMKPANIC */ #define _ASSERT_PANIC(name) \ Panic(_##name##Fmt "\n", __FILE__, __LINE__) #define _ASSERT_PANIC_BUG(bug, name) \ Panic(_##name##Fmt " bugNr=%d\n", __FILE__, __LINE__, bug) #define _ASSERT_PANIC_NORETURN(name) \ Panic(_##name##Fmt "\n", __FILE__, __LINE__) #define _ASSERT_PANIC_BUG_NORETURN(bug, name) \ Panic(_##name##Fmt " bugNr=%d\n", __FILE__, __LINE__, bug) #endif /* VMKPANIC */ #endif // } // These strings don't have newline so that a bug can be tacked on. #define _AssertPanicFmt "PANIC %s:%d" #define _AssertAssertFmt "ASSERT %s:%d" #define _AssertVerifyFmt "VERIFY %s:%d" #define _AssertNotImplementedFmt "NOT_IMPLEMENTED %s:%d" #define _AssertNotReachedFmt "NOT_REACHED %s:%d" #define _AssertMemAllocFmt "MEM_ALLOC %s:%d" #define _AssertNotTestedFmt "NOT_TESTED %s:%d" /* * Panic and log functions */ void Log(const char *fmt, ...) PRINTF_DECL(1, 2); void Warning(const char *fmt, ...) PRINTF_DECL(1, 2); #if defined VMKPANIC void Panic_SaveRegs(void); NORETURN void Panic_NoSave(const char *fmt, ...) PRINTF_DECL(1, 2); #define Panic(fmt...) do { \ Panic_SaveRegs(); \ Panic_NoSave(fmt); \ } while(0) #else NORETURN void Panic(const char *fmt, ...) PRINTF_DECL(1, 2); #endif void LogThrottled(uint32 *count, const char *fmt, ...) PRINTF_DECL(2, 3); void WarningThrottled(uint32 *count, const char *fmt, ...) PRINTF_DECL(2, 3); #ifndef ASSERT_IFNOT /* * PR 271512: When compiling with gcc, catch assignments inside an ASSERT. * * 'UNLIKELY' is defined with __builtin_expect, which does not warn when * passed an assignment (gcc bug 36050). To get around this, we put 'cond' * in an 'if' statement and make sure it never gets executed by putting * that inside of 'if (0)'. We use gcc's statement expression syntax to * make ASSERT an expression because some code uses it that way. * * Since statement expression syntax is a gcc extension and since it's * not clear if this is a problem with other compilers, the ASSERT * definition was not changed for them. Using a bare 'cond' with the * ternary operator may provide a solution. */ #ifdef __GNUC__ #define ASSERT_IFNOT(cond, panic) \ ({if (UNLIKELY(!(cond))) { panic; if (0) { if (cond) {;}}} (void)0;}) #else #define ASSERT_IFNOT(cond, panic) \ (UNLIKELY(!(cond)) ? (panic) : (void)0) #endif #endif /* * Assert, panic, and log macros * * Some of these are redefined below undef !VMX86_DEBUG. * ASSERT() is special cased because of interaction with Windows DDK. */ #if defined VMX86_DEBUG #undef ASSERT #define ASSERT(cond) ASSERT_IFNOT(cond, _ASSERT_PANIC(AssertAssert)) #define ASSERT_BUG(bug, cond) \ ASSERT_IFNOT(cond, _ASSERT_PANIC_BUG(bug, AssertAssert)) #endif #undef VERIFY #define VERIFY(cond) \ ASSERT_IFNOT(cond, _ASSERT_PANIC_NORETURN(AssertVerify)) #define VERIFY_BUG(bug, cond) \ ASSERT_IFNOT(cond, _ASSERT_PANIC_BUG_NORETURN(bug, AssertVerify)) #define PANIC() _ASSERT_PANIC(AssertPanic) #define PANIC_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertPanic) #define ASSERT_NOT_IMPLEMENTED(cond) \ ASSERT_IFNOT(cond, NOT_IMPLEMENTED()) #if defined VMKPANIC || defined VMM #define NOT_IMPLEMENTED() _ASSERT_PANIC_NORETURN(AssertNotImplemented) #else #define NOT_IMPLEMENTED() _ASSERT_PANIC(AssertNotImplemented) #endif #if defined VMM #define NOT_IMPLEMENTED_BUG(bug) \ _ASSERT_PANIC_BUG_NORETURN(bug, AssertNotImplemented) #else #define NOT_IMPLEMENTED_BUG(bug) _ASSERT_PANIC_BUG(bug, AssertNotImplemented) #endif #if defined VMKPANIC || defined VMM #define NOT_REACHED() _ASSERT_PANIC_NORETURN(AssertNotReached) #else #define NOT_REACHED() _ASSERT_PANIC(AssertNotReached) #endif #define ASSERT_MEM_ALLOC(cond) \ ASSERT_IFNOT(cond, _ASSERT_PANIC(AssertMemAlloc)) #ifdef VMX86_DEVEL #define NOT_TESTED() Warning(_AssertNotTestedFmt "\n", __FILE__, __LINE__) #else #define NOT_TESTED() Log(_AssertNotTestedFmt "\n", __FILE__, __LINE__) #endif #define ASSERT_NO_INTERRUPTS() ASSERT(!INTERRUPTS_ENABLED()) #define ASSERT_HAS_INTERRUPTS() ASSERT(INTERRUPTS_ENABLED()) #define ASSERT_NOT_TESTED(cond) (UNLIKELY(!(cond)) ? NOT_TESTED() : (void)0) #define NOT_TESTED_ONCE() DO_ONCE(NOT_TESTED()) #define NOT_TESTED_1024() \ do { \ static uint16 count = 0; \ if (UNLIKELY(count == 0)) { NOT_TESTED(); } \ count = (count + 1) & 1023; \ } while (0) #define LOG_ONCE(_s) DO_ONCE(Log _s) /* * Redefine macros that are only in debug versions */ #if !defined VMX86_DEBUG // { #undef ASSERT #define ASSERT(cond) ((void)0) #define ASSERT_BUG(bug, cond) ((void)0) /* * Expand NOT_REACHED() as appropriate for each situation. * * Mainly, we want the compiler to infer the same control-flow * information as it would from Panic(). Otherwise, different * compilation options will lead to different control-flow-derived * errors, causing some make targets to fail while others succeed. * * VC++ has the __assume() built-in function which we don't trust * (see bug 43485); gcc has no such construct; we just panic in * userlevel code. The monitor doesn't want to pay the size penalty * (measured at 212 bytes for the release vmm for a minimal infinite * loop; panic would cost even more) so it does without and lives * with the inconsistency. */ #if defined VMKPANIC || defined VMM #undef NOT_REACHED #if defined __GNUC__ && (__GNUC__ > 4 || __GNUC__ == 4 && __GNUC_MINOR__ >= 5) #define NOT_REACHED() (__builtin_unreachable()) #else #define NOT_REACHED() ((void)0) #endif #else // keep debug definition #endif #undef LOG_UNEXPECTED #define LOG_UNEXPECTED(bug) ((void)0) #undef ASSERT_NOT_TESTED #define ASSERT_NOT_TESTED(cond) ((void)0) #undef NOT_TESTED #define NOT_TESTED() ((void)0) #undef NOT_TESTED_ONCE #define NOT_TESTED_ONCE() ((void)0) #undef NOT_TESTED_1024 #define NOT_TESTED_1024() ((void)0) #endif // !VMX86_DEBUG } /* * Compile-time assertions. * * ASSERT_ON_COMPILE does not use the common * switch (0) { case 0: case (e): ; } trick because some compilers (e.g. MSVC) * generate code for it. * * The implementation uses both enum and typedef because the typedef alone is * insufficient; gcc allows arrays to be declared with non-constant expressions * (even in typedefs, where it makes no sense). * * NOTE: if GCC ever changes so that it ignores unused types altogether, this * assert might not fire! We explicitly mark it as unused because GCC 4.8+ * uses -Wunused-local-typedefs as part of -Wall, which means the typedef will * generate a warning. */ #if defined(_Static_assert) || defined(__cplusplus) || \ !defined(__GNUC__) || __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 6) #define ASSERT_ON_COMPILE(e) \ do { \ enum { AssertOnCompileMisused = ((e) ? 1 : -1) }; \ UNUSED_TYPE(typedef char AssertOnCompileFailed[AssertOnCompileMisused]); \ } while (0) #else #define ASSERT_ON_COMPILE(e) \ do { \ _Static_assert(e, #e); \ } while (0) #endif /* * To put an ASSERT_ON_COMPILE() outside a function, wrap it * in MY_ASSERTS(). The first parameter must be unique in * each .c file where it appears. For example, * * MY_ASSERTS(FS3_INT, * ASSERT_ON_COMPILE(sizeof(FS3_DiskLock) == 128); * ASSERT_ON_COMPILE(sizeof(FS3_DiskLockReserved) == DISK_BLOCK_SIZE); * ASSERT_ON_COMPILE(sizeof(FS3_DiskBlock) == DISK_BLOCK_SIZE); * ASSERT_ON_COMPILE(sizeof(Hardware_DMIUUID) == 16); * ) * * Caution: ASSERT() within MY_ASSERTS() is silently ignored. * The same goes for anything else not evaluated at compile time. */ #define MY_ASSERTS(name, assertions) \ static INLINE void name(void) { \ assertions \ } #ifdef __cplusplus } /* extern "C" */ #endif #endif /* ifndef _VM_ASSERT_H_ */ vmhgfs-only/shared/compat_sock.h 0000444 0000000 0000000 00000006002 13432725347 015754 0 ustar root root /********************************************************* * Copyright (C) 2003 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_SOCK_H__ # define __COMPAT_SOCK_H__ #include <linux/stddef.h> /* for NULL */ #include <net/sock.h> #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) static inline wait_queue_head_t *sk_sleep(struct sock *sk) { return sk->sk_sleep; } #endif /* * Prior to 2.6.24, there was no sock network namespace member. In 2.6.26, it * was hidden behind accessor functions so that its behavior could vary * depending on the value of CONFIG_NET_NS. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) # define compat_sock_net(sk) sock_net(sk) #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) # define compat_sock_net(sk) sk->sk_net #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16) #ifndef CONFIG_FILTER # define sk_filter(sk, skb, needlock) 0 #endif /* Taken from 2.6.16's sock.h and modified for macro. */ # define compat_sk_receive_skb(sk, skb, nested) \ ({ \ int rc = NET_RX_SUCCESS; \ \ if (sk_filter(sk, skb, 0)) { \ kfree_skb(skb); \ } else { \ skb->dev = NULL; \ bh_lock_sock(sk); \ if (!sock_owned_by_user(sk)) { \ rc = (sk)->sk_backlog_rcv(sk, skb); \ } else { \ sk_add_backlog(sk, skb); \ } \ bh_unlock_sock(sk); \ } \ \ sock_put(sk); \ rc; \ }) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) # define compat_sk_receive_skb(sk, skb, nested) sk_receive_skb(sk, skb) #else # define compat_sk_receive_skb(sk, skb, nested) sk_receive_skb(sk, skb, nested) #endif #endif /* __COMPAT_SOCK_H__ */ vmhgfs-only/shared/compat_highmem.h 0000444 0000000 0000000 00000002423 13432725347 016436 0 ustar root root /********************************************************* * Copyright (C) 2012 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_HIGHMEM_H__ # define __COMPAT_HIGHMEM_H__ #include <linux/highmem.h> #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) # define compat_kmap_atomic(_page) kmap_atomic(_page) # define compat_kunmap_atomic(_page) kunmap_atomic(_page) #else # define compat_kmap_atomic(_page) kmap_atomic((_page), KM_USER0) # define compat_kunmap_atomic(_page) kunmap_atomic((_page), KM_USER0) #endif #endif /* __COMPAT_HIGHMEM_H__ */ vmhgfs-only/shared/compat_namei.h 0000444 0000000 0000000 00000003416 13432725347 016114 0 ustar root root /********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_NAMEI_H__ # define __COMPAT_NAMEI_H__ #include <linux/namei.h> /* * In 2.6.25-rc2, dentry and mount objects were removed from the nameidata * struct. They were both replaced with a struct path. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) #define compat_vmw_nd_to_dentry(nd) (nd).path.dentry #else #define compat_vmw_nd_to_dentry(nd) (nd).dentry #endif /* In 2.6.25-rc2, path_release(&nd) was replaced with path_put(&nd.path). */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) #define compat_path_release(nd) path_put(&(nd)->path) #else #define compat_path_release(nd) path_release(nd) #endif /* path_lookup was removed in 2.6.39 merge window VFS merge */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) #define compat_path_lookup(name, flags, nd) kern_path(name, flags, &((nd)->path)) #else #define compat_path_lookup(name, flags, nd) path_lookup(name, flags, nd) #endif #endif /* __COMPAT_NAMEI_H__ */ vmhgfs-only/shared/compat_kernel.h 0000444 0000000 0000000 00000002735 13432725347 016306 0 ustar root root /********************************************************* * Copyright (C) 2004 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_KERNEL_H__ # define __COMPAT_KERNEL_H__ #include <asm/unistd.h> #include <linux/kernel.h> /* * container_of was introduced in 2.5.28 but it's easier to check like this. */ #ifndef container_of #define container_of(ptr, type, member) ({ \ const typeof( ((type *)0)->member ) *__mptr = (ptr); \ (type *)( (char *)__mptr - offsetof(type,member) );}) #endif /* * vsnprintf became available in 2.4.10. For older kernels, just fall back on * vsprintf. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10) #define vsnprintf(str, size, fmt, args) vsprintf(str, fmt, args) #endif #endif /* __COMPAT_KERNEL_H__ */ vmhgfs-only/shared/circList.h 0000444 0000000 0000000 00000025262 13432725346 015236 0 ustar root root /********************************************************* * Copyright (C) 1998-2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * circList.h -- * * macros, prototypes and struct definitions for double-linked * circular lists. */ #ifndef _CIRCLIST_H_ #define _CIRCLIST_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMKERNEL #include "includeCheck.h" #include "vmware.h" #if defined(__cplusplus) extern "C" { #endif typedef struct ListItem { struct ListItem *prev; struct ListItem *next; } ListItem; /* *---------------------------------------------------------------------- * * CircList_IsEmpty -- * * A NULL list is an empty list. * * Result: * TRUE if list is empty, FALSE otherwise. * * Side effects: * None. * *---------------------------------------------------------------------- */ static INLINE Bool CircList_IsEmpty(const ListItem *item) // IN { return item == NULL; } /* *---------------------------------------------------------------------- * * CircList_InitItem -- * * Initialize item as a single-element circular list. * * Result: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static INLINE void CircList_InitItem(ListItem *item) // OUT { item->prev = item->next = item; } /* *---------------------------------------------------------------------- * * CircList_First -- * * Return first item in the list. * * Result: * First item. * * Side effects: * None. * *---------------------------------------------------------------------- */ static INLINE ListItem * CircList_First(ListItem *item) // IN { return item; } /* *---------------------------------------------------------------------- * * CircList_Last -- * * Return last item in the list. * * Result: * Last item. * * Side effects: * None. * *---------------------------------------------------------------------- */ static INLINE ListItem * CircList_Last(ListItem *item) { return item->prev; } /* * CIRC_LIST_CONTAINER - get the struct for this entry (like list_entry) * @ptr: the &struct ListItem pointer. * @type: the type of the struct this is embedded in. * @member: the name of the list struct within the struct. */ #define CIRC_LIST_CONTAINER(ptr, type, member) \ VMW_CONTAINER_OF(ptr, type, member) /* * Historical name, left here to reduce churn. * TODO: remove, all LIST_CONTAINER uses should be * VMW_CONTAINER_OF and stop depending on circList.h * to provide the definition. */ #define LIST_CONTAINER(ptr, type, member) VMW_CONTAINER_OF(ptr, type, member) /* * LIST_SCAN_FROM scans the list from "from" up until "until". * The loop variable p should not be destroyed in the process. * "from" is an element in the list where to start scanning. * "until" is the element where search should stop. * member is the field to use for the search - either "next" or "prev". */ #define CIRC_LIST_SCAN_FROM(p, from, until, member) \ for (p = (from); (p) != NULL; \ (p) = (((p)->member == (until)) ? NULL : (p)->member)) /* scan the entire list (non-destructively) */ #define CIRC_LIST_SCAN(p, l) \ CIRC_LIST_SCAN_FROM(p, CircList_First(l), CircList_First(l), next) /* scan the entire list where loop element may be destroyed */ #define CIRC_LIST_SCAN_SAFE(p, pn, l) \ if (!CircList_IsEmpty(l)) \ for (p = (l), (pn) = CircList_Next(p, l); (p) != NULL; \ (p) = (pn), (pn) = CircList_Next(p, l)) /* scan the entire list backwards where loop element may be destroyed */ #define CIRC_LIST_SCAN_BACK_SAFE(p, pn, l) \ if (!CircList_IsEmpty(l)) \ for (p = CircList_Last(l), (pn) = CircList_Prev(p, l); (p) != NULL; \ (p) = (pn), (pn) = CircList_Prev(p, l)) /* *---------------------------------------------------------------------- * * CircList_Next -- * * Returns the next member of a doubly linked list, or NULL if last. * Assumes: p is member of the list headed by head. * * Result: * If head or p is NULL, return NULL. Otherwise, * next list member (or null if last). * * Side effects: * None. * *---------------------------------------------------------------------- */ static INLINE ListItem * CircList_Next(ListItem *p, // IN ListItem *head) // IN { if (head == NULL || p == NULL) { return NULL; } /* both p and head are non-null */ p = p->next; return p == head ? NULL : p; } /* *---------------------------------------------------------------------- * * CircList_Prev -- * * Returns the prev member of a doubly linked list, or NULL if first. * Assumes: p is member of the list headed by head. * * Result: * If head or prev is NULL, return NULL. Otherwise, * prev list member (or null if first). * * Side effects: * None. * *---------------------------------------------------------------------- */ static INLINE ListItem * CircList_Prev(ListItem *p, // IN ListItem *head) // IN { if (head == NULL || p == NULL) { return NULL; } /* both p and head are non-null */ return p == head ? NULL : p->prev; } /* *---------------------------------------------------------------------- * * CircList_DeleteItem -- * * Deletes a member of a doubly linked list, possibly modifies the * list header itself. * Assumes neither p nor headp is null and p is a member of *headp. * * Result: * None * * Side effects: * Modifies *headp. * *---------------------------------------------------------------------- */ static INLINE void CircList_DeleteItem(ListItem *p, // IN ListItem **headp) // IN/OUT { ListItem *next; ASSERT(p != NULL); ASSERT(headp != NULL); next = p->next; if (p == next) { *headp = NULL; } else { next->prev = p->prev; p->prev->next = next; if (*headp == p) { *headp = next; } } } /* *---------------------------------------------------------------------- * * CircList_Queue -- * * Adds a new member to the back of a doubly linked list (queue) * Assumes neither p nor headp is null and p is not a member of *headp. * * Result: * None * * Side effects: * Modifies *headp. * *---------------------------------------------------------------------- */ static INLINE void CircList_Queue(ListItem *p, // IN ListItem **headp) // IN/OUT { ListItem *head; head = *headp; if (CircList_IsEmpty(head)) { CircList_InitItem(p); *headp = p; } else { p->prev = head->prev; p->next = head; p->prev->next = p; head->prev = p; } } /* *---------------------------------------------------------------------- * * CircList_Push -- * * Adds a new member to the front of a doubly linked list (stack) * Assumes neither p nor headp is null and p is not a member of *headp. * * Result: * None * * Side effects: * Modifies *headp. * *---------------------------------------------------------------------- */ static INLINE void CircList_Push(ListItem *p, // IN ListItem **headp) // IN/OUT { CircList_Queue(p, headp); *headp = p; } /* *---------------------------------------------------------------------- * * CircList_Splice -- * * Make a single list {l1 l2} from {l1} and {l2} and return it. * It is okay for one or both lists to be NULL. * No checking is done. It is assumed that l1 and l2 are two * distinct lists. * * Result: * A list { l1 l2 }. * * Side effects: * Modifies l1 and l2 list pointers. * *---------------------------------------------------------------------- */ static INLINE ListItem * CircList_Splice(ListItem *l1, // IN ListItem *l2) // IN { ListItem *l1Last, *l2Last; if (CircList_IsEmpty(l1)) { return l2; } if (CircList_IsEmpty(l2)) { return l1; } l1Last = l1->prev; /* last elem of l1 */ l2Last = l2->prev; /* last elem of l2 */ /* * l1 -> ... -> l1Last l2 -> ... l2Last */ l1Last->next = l2; l2->prev = l1Last; l1->prev = l2Last; l2Last->next = l1; return l1; } #if 0 /* Presently unused, enable if a use is found */ /* *---------------------------------------------------------------------- * * CircList_Split -- * * Make a list l = {l1 l2} into two separate lists {l1} and {l2}, where: * l = { ... x -> p -> ... } split into: * l1 = { ... -> x } * l2 = { p -> ... } * Assumes neither p nor l is null and p is a member of l. * If p is the first element of l, then l1 will be NULL. * * Result: * None. * * Side effects: * Sets *l1p and *l2p to the resulting two lists. * Modifies l's pointers. * *---------------------------------------------------------------------- */ static INLINE void CircList_Split(ListItem *p, // IN ListItem *l, // IN ListItem **l1p, // OUT ListItem **l2p) // OUT { ListItem *last; if (p == CircList_First(l)) { /* first element */ *l1p = NULL; *l2p = l; return; } last = l->prev; *l1p = l; p->prev->next = l; l->prev = p->prev; *l2p = p; p->prev = last; last->next = p; } #endif /* *---------------------------------------------------------------------- * * CircList_Size -- * * Return the number of items in the list. * * Result: * The number of items in the list. * * Side effects: * None. * *---------------------------------------------------------------------- */ static INLINE int CircList_Size(ListItem *head) // IN { ListItem *li; int ret = 0; CIRC_LIST_SCAN(li, head) { ret++; } return ret; } #if defined(__cplusplus) } // extern "C" #endif #endif /* _CIRCLIST_H_ */ vmhgfs-only/shared/vmware.h 0000444 0000000 0000000 00000003507 13432725350 014754 0 ustar root root /********************************************************* * Copyright (C) 2003-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmware.h -- * * Standard include file for VMware source code. */ #ifndef _VMWARE_H_ #define _VMWARE_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #include "includeCheck.h" #include "vm_basic_types.h" #include "vm_basic_defs.h" #include "vm_assert.h" /* * Global error codes. Currently used internally, but may be exported * to customers one day, like VM_E_XXX in vmcontrol_constants.h */ typedef enum VMwareStatus { VMWARE_STATUS_SUCCESS, /* success */ VMWARE_STATUS_ERROR, /* generic error */ VMWARE_STATUS_NOMEM, /* generic memory allocation error */ VMWARE_STATUS_INSUFFICIENT_RESOURCES, /* internal or system resource limit exceeded */ VMWARE_STATUS_INVALID_ARGS /* invalid arguments */ } VMwareStatus; #define VMWARE_SUCCESS(s) ((s) == VMWARE_STATUS_SUCCESS) #endif // ifndef _VMWARE_H_ vmhgfs-only/shared/compat_netdevice.h 0000444 0000000 0000000 00000024324 13432725347 016772 0 ustar root root /********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_NETDEVICE_H__ # define __COMPAT_NETDEVICE_H__ #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/pci.h> /* * The enet_statistics structure moved from linux/if_ether.h to * linux/netdevice.h and is renamed net_device_stats in 2.1.25 --hpreg */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 1, 25) # include <linux/if_ether.h> # define net_device_stats enet_statistics #endif /* The netif_rx_ni() API appeared in 2.4.8 --hpreg */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 8) # define netif_rx_ni netif_rx #endif /* The device struct was renamed net_device in 2.3.14 --hpreg */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14) # define net_device device #endif /* * SET_MODULE_OWNER appeared sometime during 2.3.x. It was setting * dev->owner = THIS_MODULE until 2.5.70, where netdevice refcounting * was completely changed. SET_MODULE_OWNER was nop for whole * 2.6.x series, and finally disappeared in 2.6.24. * * MOD_xxx_USE_COUNT wrappers are here, as they must be mutually * exclusive with SET_MODULE_OWNER call. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) # define COMPAT_SET_MODULE_OWNER(dev) do {} while (0) # define COMPAT_NETDEV_MOD_INC_USE_COUNT MOD_INC_USE_COUNT # define COMPAT_NETDEV_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT #else # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) # define COMPAT_SET_MODULE_OWNER(dev) SET_MODULE_OWNER(dev) # else # define COMPAT_SET_MODULE_OWNER(dev) do {} while (0) # endif # define COMPAT_NETDEV_MOD_INC_USE_COUNT do {} while (0) # define COMPAT_NETDEV_MOD_DEC_USE_COUNT do {} while (0) #endif /* * SET_NETDEV_DEV appeared sometime during 2.5.x, and later was * crossported to various 2.4.x kernels (as dummy macro). */ #ifdef SET_NETDEV_DEV # define COMPAT_SET_NETDEV_DEV(dev, pdev) SET_NETDEV_DEV(dev, pdev) #else # define COMPAT_SET_NETDEV_DEV(dev, pdev) do {} while (0) #endif /* * Build alloc_etherdev API on the top of init_etherdev. For 2.0.x kernels * we must provide dummy init method, otherwise register_netdev does * nothing. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0) int vmware_dummy_init(struct net_device *dev) { return 0; } #endif static inline struct net_device* compat_alloc_etherdev(int priv_size) { struct net_device* dev; int size = sizeof *dev + priv_size; /* * The name is dynamically allocated before 2.4.0, but * is an embedded array in later kernels. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) size += sizeof("ethXXXXXXX"); #endif dev = kmalloc(size, GFP_KERNEL); if (dev) { memset(dev, 0, size); if (priv_size) { dev->priv = dev + 1; } #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) dev->name = (char *)(dev + 1) + priv_size; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0) dev->init = vmware_dummy_init; #endif if (init_etherdev(dev, 0) != dev) { kfree(dev); dev = NULL; } } return dev; } #else #define compat_alloc_etherdev(sz) alloc_etherdev(sz) #endif /* * alloc_netdev and free_netdev are there since 2.4.23. Their use is mandatory * since 2.6.24. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 23) static inline struct net_device * compat_alloc_netdev(int priv_size, const char *mask, void (*setup)(struct net_device *)) { struct net_device *dev; int netdev_size = sizeof *dev; int alloc_size; # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) netdev_size += IFNAMSIZ; # endif alloc_size = netdev_size + priv_size; dev = kmalloc(alloc_size, GFP_KERNEL); if (dev) { memset(dev, 0, alloc_size); dev->priv = (char*)dev + netdev_size; setup(dev); # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) dev->name = (char*)(dev + 1); # endif strcpy(dev->name, mask); } return dev; } # define compat_free_netdev(dev) kfree(dev) #else # define compat_alloc_netdev(size, mask, setup) alloc_netdev(size, mask, setup) # define compat_free_netdev(dev) free_netdev(dev) #endif /* netdev_priv() appeared in 2.6.3 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 3) # define compat_netdev_priv(netdev) (netdev)->priv #else # define compat_netdev_priv(netdev) netdev_priv(netdev) #endif /* * In 3.1 merge window feature maros were removed from mainline, * so let's add back ones we care about. */ #if !defined(HAVE_NET_DEVICE_OPS) && \ LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) # define HAVE_NET_DEVICE_OPS 1 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9) # define COMPAT_NETDEV_TX_OK NETDEV_TX_OK # define COMPAT_NETDEV_TX_BUSY NETDEV_TX_BUSY #else # define COMPAT_NETDEV_TX_OK 0 # define COMPAT_NETDEV_TX_BUSY 1 #endif /* unregister_netdevice_notifier was not safe prior to 2.6.17 */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17) && \ !defined(ATOMIC_NOTIFIER_INIT) /* pre 2.6.17 and not patched */ static inline int compat_unregister_netdevice_notifier(struct notifier_block *nb) { int err; rtnl_lock(); err = unregister_netdevice_notifier(nb); rtnl_unlock(); return err; } #else /* post 2.6.17 or patched */ #define compat_unregister_netdevice_notifier(_nb) \ unregister_netdevice_notifier(_nb); #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) || defined(__VMKLNX__) # define compat_netif_napi_add(dev, napi, poll, quota) \ netif_napi_add(dev, napi, poll, quota) # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) || \ defined VMW_NETIF_SINGLE_NAPI_PARM # define compat_napi_complete(dev, napi) napi_complete(napi) # define compat_napi_schedule(dev, napi) napi_schedule(napi) # else # define compat_napi_complete(dev, napi) netif_rx_complete(dev, napi) # define compat_napi_schedule(dev, napi) netif_rx_schedule(dev, napi) # endif # define compat_napi_enable(dev, napi) napi_enable(napi) # define compat_napi_disable(dev, napi) napi_disable(napi) #else # define compat_napi_complete(dev, napi) netif_rx_complete(dev) # define compat_napi_schedule(dev, napi) netif_rx_schedule(dev) # define compat_napi_enable(dev, napi) netif_poll_enable(dev) # define compat_napi_disable(dev, napi) netif_poll_disable(dev) /* RedHat ported GRO to 2.6.18 bringing new napi_struct with it */ # if defined NETIF_F_GRO # define compat_netif_napi_add(netdev, napi, pollcb, quota) \ do { \ (netdev)->poll = (pollcb); \ (netdev)->weight = (quota);\ (napi)->dev = (netdev); \ } while (0) # else struct napi_struct { int dummy; }; # define compat_netif_napi_add(dev, napi, pollcb, quota) \ do { \ (dev)->poll = (pollcb); \ (dev)->weight = (quota);\ } while (0) # endif #endif #ifdef NETIF_F_TSO6 # define COMPAT_NETIF_F_TSO (NETIF_F_TSO6 | NETIF_F_TSO) #else # define COMPAT_NETIF_F_TSO (NETIF_F_TSO) #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18) # define compat_netif_tx_lock(dev) netif_tx_lock(dev) # define compat_netif_tx_unlock(dev) netif_tx_unlock(dev) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16) # define compat_netif_tx_lock(dev) spin_lock(&dev->xmit_lock) # define compat_netif_tx_unlock(dev) spin_unlock(&dev->xmit_lock) #else /* Vendor backporting (SLES 10) has muddled the tx_lock situation. Pick whichever * of the above works for you. */ # define compat_netif_tx_lock(dev) do {} while (0) # define compat_netif_tx_unlock(dev) do {} while (0) #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) # define COMPAT_VLAN_GROUP_ARRAY_LEN VLAN_N_VID # define compat_flush_scheduled_work(work) cancel_work_sync(work) #else # define COMPAT_VLAN_GROUP_ARRAY_LEN VLAN_GROUP_ARRAY_LEN # define compat_flush_scheduled_work(work) flush_scheduled_work() #endif /* * For kernel versions older than 2.6.29, where pci_msi_enabled is not * available, check if * 1. CONFIG_PCI_MSI is present * 2. kernel version is newer than 2.6.25 (because multiqueue is not * supporter) in kernels older than that) * 3. msi can be enabled. If it fails it means that MSI is not available. * When all the above are true, return non-zero so that multiple queues will be * allowed in the driver. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) # define compat_multiqueue_allowed(dev) pci_msi_enabled() #else # if defined CONFIG_PCI_MSI && LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 25) static inline int compat_multiqueue_allowed(struct pci_dev *dev) { int ret; if (!pci_enable_msi(dev)) ret = 1; else ret = 0; pci_disable_msi(dev); return ret; } # else # define compat_multiqueue_allowed(dev) (0) # endif #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 37) # define compat_vlan_get_protocol(skb) vlan_get_protocol(skb) #else # define compat_vlan_get_protocol(skb) (skb->protocol) #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0) typedef netdev_features_t compat_netdev_features_t; #else typedef u32 compat_netdev_features_t; #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 7, 0) || defined(VMW_NETIF_TRANS_UPDATE) #define compat_netif_trans_update(d) netif_trans_update(d) #else #define compat_netif_trans_update(d) do { (d)->trans_start = jiffies; } while (0) #endif #endif /* __COMPAT_NETDEVICE_H__ */ vmhgfs-only/shared/compat_dcache.h 0000444 0000000 0000000 00000004003 13432725347 016223 0 ustar root root /********************************************************* * Copyright (C) 2013 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_DCACHE_H__ # define __COMPAT_DCACHE_H__ #include <linux/dcache.h> /* * per-dentry locking was born in 2.5.62. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 62) #define compat_lock_dentry(dentry) spin_lock(&dentry->d_lock) #define compat_unlock_dentry(dentry) spin_unlock(&dentry->d_lock) #else #define compat_lock_dentry(dentry) do {} while (0) #define compat_unlock_dentry(dentry) do {} while (0) #endif /* * d_alloc_name was born in 2.6.10. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 10) #define compat_d_alloc_name(parent, s) d_alloc_name(parent, s) #else #define compat_d_alloc_name(parent, s) \ ({ \ struct qstr q; \ q.name = s; \ q.len = strlen(s); \ q.hash = full_name_hash(q.name, q.len); \ d_alloc(parent, &q); \ }) #endif #endif /* __COMPAT_DCACHE_H__ */ vmhgfs-only/shared/compat_pci_mapping.h 0000444 0000000 0000000 00000004741 13432725347 017313 0 ustar root root /********************************************************* * Copyright (C) 2008 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_PCI_MAPPING_H__ #define __COMPAT_PCI_MAPPING_H__ #include <asm/types.h> #include <asm/io.h> #include <linux/pci.h> #if LINUX_VERSION_CODE < KERNEL_VERSION(2,3,41) typedef u32 dma_addr_t; static __inline__ int get_order(unsigned long size) { int order; size = (size - 1) >> (PAGE_SHIFT - 1); order = -1; do { size >>= 1; order++; } while (size); return order; } static inline void * compat_pci_alloc_consistent(struct pci_dev *hwdev, size_t size, dma_addr_t *dma_handle) { void *ptr = (void *)__get_free_pages(GFP_ATOMIC, get_order(size)); if (ptr) { memset(ptr, 0, size); *dma_handle = virt_to_phys(ptr); } return ptr; } static inline void compat_pci_free_consistent(struct pci_dev *hwdev, size_t size, void *vaddr, dma_addr_t dma_handle) { free_pages((unsigned long)vaddr, get_order(size)); } static inline dma_addr_t compat_pci_map_single(struct pci_dev *hwdev, void *ptr, size_t size, int direction) { return virt_to_phys(ptr); } static inline void compat_pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr, size_t size, int direction) { } #else #define compat_pci_alloc_consistent(hwdev, size, dma_handle) \ pci_alloc_consistent(hwdev, size, dma_handle) #define compat_pci_free_consistent(hwdev, size, vaddr, dma_handle) \ pci_free_consistent(hwdev, size, vaddr, dma_handle) #define compat_pci_map_single(hwdev, ptr, size, direction) \ pci_map_single(hwdev, ptr, size, direction) #define compat_pci_unmap_single(hwdev, dma_addr, size, direction) \ pci_unmap_single(hwdev, dma_addr, size, direction) #endif #endif vmhgfs-only/shared/vm_basic_asm_x86.h 0000444 0000000 0000000 00000042724 13432725350 016607 0 ustar root root /********************************************************* * Copyright (C) 1998-2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vm_basic_asm_x86.h * * Basic IA32 asm macros */ #ifndef _VM_BASIC_ASM_X86_H_ #define _VM_BASIC_ASM_X86_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" #if defined __cplusplus extern "C" { #endif #ifdef VM_X86_64 /* * The gcc inline asm uses the "A" constraint which differs in 32 & 64 * bit mode. 32 bit means eax and edx, 64 means rax or rdx. */ #error "x86-64 not supported" #endif /* * XTEST * Return TRUE if processor is in transaction region. * */ #if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS)) static INLINE Bool xtest(void) { uint8 al; __asm__ __volatile__(".byte 0x0f, 0x01, 0xd6 # xtest \n" "setnz %%al\n" : "=a"(al) : : "cc"); return al; } #endif /* __GNUC__ */ /* * FXSAVE/FXRSTOR * save/restore SIMD/MMX fpu state * * The pointer passed in must be 16-byte aligned. * * Intel and AMD processors behave differently w.r.t. fxsave/fxrstor. Intel * processors unconditionally save the exception pointer state (instruction * ptr., data ptr., and error instruction opcode). FXSAVE_ES1 and FXRSTOR_ES1 * work correctly for Intel processors. * * AMD processors only save the exception pointer state if ES=1. This leads to a * security hole whereby one process/VM can inspect the state of another process * VM. The AMD recommended workaround involves clobbering the exception pointer * state unconditionally, and this is implemented in FXRSTOR_AMD_ES0. Note that * FXSAVE_ES1 will only save the exception pointer state for AMD processors if * ES=1. * * The workaround (FXRSTOR_AMD_ES0) only costs 1 cycle more than just doing an * fxrstor, on both AMD Opteron and Intel Core CPUs. */ #if defined(__GNUC__) static INLINE void FXSAVE_ES1(void *save) { __asm__ __volatile__ ("fxsave %0\n" : "=m" (*(uint8 *)save) : : "memory"); } static INLINE void FXRSTOR_ES1(const void *load) { __asm__ __volatile__ ("fxrstor %0\n" : : "m" (*(const uint8 *)load) : "memory"); } static INLINE void FXRSTOR_AMD_ES0(const void *load) { uint64 dummy = 0; __asm__ __volatile__ ("fnstsw %%ax \n" // Grab x87 ES bit "bt $7,%%ax \n" // Test ES bit "jnc 1f \n" // Jump if ES=0 "fnclex \n" // ES=1. Clear it so fild doesn't trap "1: \n" "ffree %%st(7) \n" // Clear tag bit - avoid poss. stack overflow "fildl %0 \n" // Dummy Load from "safe address" changes all // x87 exception pointers. "fxrstor %1 \n" : : "m" (dummy), "m" (*(const uint8 *)load) : "ax", "memory"); } #endif /* __GNUC__ */ /* * XSAVE/XRSTOR * save/restore GSSE/SIMD/MMX fpu state * * The pointer passed in must be 64-byte aligned. * See above comment for more information. */ #if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS)) static INLINE void XSAVE_ES1(void *save, uint64 mask) { #if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1 __asm__ __volatile__ ( ".byte 0x0f, 0xae, 0x21 \n" : : "c" ((uint8 *)save), "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); #else __asm__ __volatile__ ( "xsave %0 \n" : "=m" (*(uint8 *)save) : "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); #endif } static INLINE void XSAVEOPT_ES1(void *save, uint64 mask) { __asm__ __volatile__ ( ".byte 0x0f, 0xae, 0x31 \n" : : "c" ((uint8 *)save), "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); } static INLINE void XRSTOR_ES1(const void *load, uint64 mask) { #if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1 __asm__ __volatile__ ( ".byte 0x0f, 0xae, 0x29 \n" : : "c" ((const uint8 *)load), "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); #else __asm__ __volatile__ ( "xrstor %0 \n" : : "m" (*(const uint8 *)load), "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); #endif } static INLINE void XRSTOR_AMD_ES0(const void *load, uint64 mask) { uint64 dummy = 0; __asm__ __volatile__ ("fnstsw %%ax \n" // Grab x87 ES bit "bt $7,%%ax \n" // Test ES bit "jnc 1f \n" // Jump if ES=0 "fnclex \n" // ES=1. Clear it so fild doesn't trap "1: \n" "ffree %%st(7) \n" // Clear tag bit - avoid poss. stack overflow "fildl %0 \n" // Dummy Load from "safe address" changes all // x87 exception pointers. "mov %%ebx, %%eax \n" #if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1 ".byte 0x0f, 0xae, 0x29 \n" : : "m" (dummy), "c" ((const uint8 *)load), "b" ((uint32)mask), "d" ((uint32)(mask >> 32)) #else "xrstor %1 \n" : : "m" (dummy), "m" (*(const uint8 *)load), "b" ((uint32)mask), "d" ((uint32)(mask >> 32)) #endif : "eax", "memory"); } #endif /* __GNUC__ */ /* *----------------------------------------------------------------------------- * * Div643232 -- * * Unsigned integer division: * The dividend is 64-bit wide * The divisor is 32-bit wide * The quotient is 32-bit wide * * Use this function if you are certain that: * o Either the quotient will fit in 32 bits, * o Or your code is ready to handle a #DE exception indicating overflow. * If that is not the case, then use Div643264(). * * Results: * Quotient and remainder * * Side effects: * None * *----------------------------------------------------------------------------- */ #if defined(__GNUC__) static INLINE void Div643232(uint64 dividend, // IN uint32 divisor, // IN uint32 *quotient, // OUT uint32 *remainder) // OUT { __asm__( "divl %4" : "=a" (*quotient), "=d" (*remainder) : "0" ((uint32)dividend), "1" ((uint32)(dividend >> 32)), "rm" (divisor) : "cc" ); } #elif defined _MSC_VER static INLINE void Div643232(uint64 dividend, // IN uint32 divisor, // IN uint32 *quotient, // OUT uint32 *remainder) // OUT { __asm { mov eax, DWORD PTR [dividend] mov edx, DWORD PTR [dividend+4] div DWORD PTR [divisor] mov edi, DWORD PTR [quotient] mov [edi], eax mov edi, DWORD PTR [remainder] mov [edi], edx } } #else #error No compiler defined for Div643232 #endif #if defined(__GNUC__) /* *----------------------------------------------------------------------------- * * Div643264 -- * * Unsigned integer division: * The dividend is 64-bit wide * The divisor is 32-bit wide * The quotient is 64-bit wide * * Results: * Quotient and remainder * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Div643264(uint64 dividend, // IN uint32 divisor, // IN uint64 *quotient, // OUT uint32 *remainder) // OUT { uint32 hQuotient; uint32 lQuotient; __asm__( "divl %5" "\n\t" "movl %%eax, %0" "\n\t" "movl %4, %%eax" "\n\t" "divl %5" : "=&rm" (hQuotient), "=a" (lQuotient), "=d" (*remainder) : "1" ((uint32)(dividend >> 32)), "g" ((uint32)dividend), "rm" (divisor), "2" (0) : "cc" ); *quotient = (uint64)hQuotient << 32 | lQuotient; } #endif /* *----------------------------------------------------------------------------- * * Mul64x3264 -- * * Unsigned integer by fixed point multiplication, with rounding: * result = floor(multiplicand * multiplier * 2**(-shift) + 0.5) * * Unsigned 64-bit integer multiplicand. * Unsigned 32-bit fixed point multiplier, represented as * (multiplier, shift), where shift < 64. * * Result: * Unsigned 64-bit integer product. * *----------------------------------------------------------------------------- */ #if defined(__GNUC__) && \ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) && \ !defined(MUL64_NO_ASM) static INLINE uint64 Mul64x3264(uint64 multiplicand, uint32 multiplier, uint32 shift) { uint64 result; uint32 tmp1, tmp2; // ASSERT(shift >= 0 && shift < 64); __asm__("mov %%eax, %2\n\t" // Save lo(multiplicand) "mov %%edx, %%eax\n\t" // Get hi(multiplicand) "mull %4\n\t" // p2 = hi(multiplicand) * multiplier "xchg %%eax, %2\n\t" // Save lo(p2), get lo(multiplicand) "mov %%edx, %1\n\t" // Save hi(p2) "mull %4\n\t" // p1 = lo(multiplicand) * multiplier "addl %2, %%edx\n\t" // hi(p1) += lo(p2) "adcl $0, %1\n\t" // hi(p2) += carry from previous step "cmpl $32, %%ecx\n\t" // shift < 32? "jl 2f\n\t" // Go if so "shll $1, %%eax\n\t" // Save lo(p1) bit 31 in CF in case shift=32 "mov %%edx, %%eax\n\t" // result = hi(p2):hi(p1) >> (shift & 31) "mov %1, %%edx\n\t" "shrdl %%edx, %%eax\n\t" "mov $0, %2\n\t" "adcl $0, %2\n\t" // Get highest order bit shifted out, from CF "shrl %%cl, %%edx\n\t" "jmp 3f\n" "2:\n\t" "xor %2, %2\n\t" "shrdl %%edx, %%eax\n\t" // result = hi(p2):hi(p1):lo(p1) >> shift "adcl $0, %2\n\t" // Get highest order bit shifted out, from CF "shrdl %1, %%edx\n" "3:\n\t" "addl %2, %%eax\n\t" // result += highest order bit shifted out "adcl $0, %%edx" : "=A" (result), "=&r" (tmp1), "=&r" (tmp2) : "0" (multiplicand), "rm" (multiplier), "c" (shift) : "cc"); return result; } #elif defined _MSC_VER #pragma warning(disable: 4035) static INLINE uint64 Mul64x3264(uint64 multiplicand, uint32 multiplier, uint32 shift) { // ASSERT(shift >= 0 && shift < 64); __asm { mov eax, DWORD PTR [multiplicand+4] // Get hi(multiplicand) mul DWORD PTR [multiplier] // p2 = hi(multiplicand) * multiplier mov ecx, eax // Save lo(p2) mov ebx, edx // Save hi(p2) mov eax, DWORD PTR [multiplicand] // Get lo(multiplicand) mul DWORD PTR [multiplier+0] // p1 = lo(multiplicand) * multiplier add edx, ecx // hi(p1) += lo(p2) adc ebx, 0 // hi(p2) += carry from previous step mov ecx, DWORD PTR [shift] // Get shift cmp ecx, 32 // shift < 32? jl SHORT l2 // Go if so shl eax, 1 // Save lo(p1) bit 31 in CF in case shift=32 mov eax, edx // result = hi(p2):hi(p1) >> (shift & 31) mov edx, ebx shrd eax, edx, cl mov esi, 0 adc esi, 0 // Get highest order bit shifted out, from CF shr edx, cl jmp SHORT l3 l2: xor esi, esi shrd eax, edx, cl // result = hi(p2):hi(p1):lo(p1) >> shift adc esi, 0 // Get highest order bit shifted out, from CF shrd edx, ebx, cl l3: add eax, esi // result += highest order bit shifted out adc edx, 0 } // return with result in edx:eax } #pragma warning(default: 4035) #else #define MUL64_NO_ASM 1 #include "mul64.h" #endif /* *----------------------------------------------------------------------------- * * Muls64x32s64 -- * * Signed integer by fixed point multiplication, with rounding: * result = floor(multiplicand * multiplier * 2**(-shift) + 0.5) * * Signed 64-bit integer multiplicand. * Unsigned 32-bit fixed point multiplier, represented as * (multiplier, shift), where shift < 64. * * Result: * Signed 64-bit integer product. * *----------------------------------------------------------------------------- */ #if defined(__GNUC__) && \ (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 4)) && \ !defined(MUL64_NO_ASM) static INLINE int64 Muls64x32s64(int64 multiplicand, uint32 multiplier, uint32 shift) { int64 result; uint32 tmp1, tmp2; // ASSERT(shift >= 0 && shift < 64); __asm__("mov %%eax, %2\n\t" // Save lo(multiplicand) "mov %%edx, %%eax\n\t" // Get hi(multiplicand) "test %%eax, %%eax\n\t" // Check sign of multiplicand "jl 0f\n\t" // Go if negative "mull %4\n\t" // p2 = hi(multiplicand) * multiplier "jmp 1f\n" "0:\n\t" "mull %4\n\t" // p2 = hi(multiplicand) * multiplier "sub %4, %%edx\n" // hi(p2) += -1 * multiplier "1:\n\t" "xchg %%eax, %2\n\t" // Save lo(p2), get lo(multiplicand) "mov %%edx, %1\n\t" // Save hi(p2) "mull %4\n\t" // p1 = lo(multiplicand) * multiplier "addl %2, %%edx\n\t" // hi(p1) += lo(p2) "adcl $0, %1\n\t" // hi(p2) += carry from previous step "cmpl $32, %%ecx\n\t" // shift < 32? "jl 2f\n\t" // Go if so "shll $1, %%eax\n\t" // Save lo(p1) bit 31 in CF in case shift=32 "mov %%edx, %%eax\n\t" // result = hi(p2):hi(p1) >> (shift & 31) "mov %1, %%edx\n\t" "shrdl %%edx, %%eax\n\t" "mov $0, %2\n\t" "adcl $0, %2\n\t" // Get highest order bit shifted out from CF "sarl %%cl, %%edx\n\t" "jmp 3f\n" "2:\n\t" "xor %2, %2\n\t" "shrdl %%edx, %%eax\n\t" // result = hi(p2):hi(p1):lo(p1) >> shift "adcl $0, %2\n\t" // Get highest order bit shifted out from CF "shrdl %1, %%edx\n" "3:\n\t" "addl %2, %%eax\n\t" // result += highest order bit shifted out "adcl $0, %%edx" : "=A" (result), "=&r" (tmp1), "=&rm" (tmp2) : "0" (multiplicand), "rm" (multiplier), "c" (shift) : "cc"); return result; } #elif defined(_MSC_VER) #pragma warning(disable: 4035) static INLINE int64 Muls64x32s64(int64 multiplicand, uint32 multiplier, uint32 shift) { //ASSERT(shift >= 0 && shift < 64); __asm { mov eax, DWORD PTR [multiplicand+4] // Get hi(multiplicand) test eax, eax // Check sign of multiplicand jl SHORT l0 // Go if negative mul DWORD PTR [multiplier] // p2 = hi(multiplicand) * multiplier jmp SHORT l1 l0: mul DWORD PTR [multiplier] // p2 = hi(multiplicand) * multiplier sub edx, DWORD PTR [multiplier] // hi(p2) += -1 * multiplier l1: mov ecx, eax // Save lo(p2) mov ebx, edx // Save hi(p2) mov eax, DWORD PTR [multiplicand] // Get lo(multiplicand) mul DWORD PTR [multiplier] // p1 = lo(multiplicand) * multiplier add edx, ecx // hi(p1) += lo(p2) adc ebx, 0 // hi(p2) += carry from previous step mov ecx, DWORD PTR [shift] // Get shift cmp ecx, 32 // shift < 32? jl SHORT l2 // Go if so shl eax, 1 // Save lo(p1) bit 31 in CF in case shift=32 mov eax, edx // result = hi(p2):hi(p1) >> (shift & 31) mov edx, ebx shrd eax, edx, cl mov esi, 0 adc esi, 0 // Get highest order bit shifted out, from CF sar edx, cl jmp SHORT l3 l2: xor esi, esi shrd eax, edx, cl // result = hi(p2):hi(p1):lo(p1) << shift adc esi, 0 // Get highest order bit shifted out, from CF shrd edx, ebx, cl l3: add eax, esi // result += highest order bit shifted out adc edx, 0 } // return with result in edx:eax } #pragma warning(default: 4035) #endif #if defined __cplusplus } // extern "C" #endif #endif // _VM_BASIC_ASM_X86_H_ vmhgfs-only/shared/compat_module.h 0000444 0000000 0000000 00000005127 13432725347 016311 0 ustar root root /********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * compat_module.h -- */ #ifndef __COMPAT_MODULE_H__ # define __COMPAT_MODULE_H__ #include <linux/module.h> /* * Modules wishing to use the GPL license are required to include a * MODULE_LICENSE definition in their module source as of 2.4.10. */ #ifndef MODULE_LICENSE #define MODULE_LICENSE(license) #endif /* * To make use of our own home-brewed MODULE_INFO, we need macros to * concatenate two expressions to "__mod_", and and to convert an * expression into a string. I'm sure we've got these in our codebase, * but I'd rather not introduce such a dependency in a compat header. */ #ifndef __module_cat #define __module_cat_1(a, b) __mod_ ## a ## b #define __module_cat(a, b) __module_cat_1(a, b) #endif #ifndef __stringify #define __stringify_1(x) #x #define __stringify(x) __stringify_1(x) #endif /* * MODULE_INFO was born in 2.5.69. */ #ifndef MODULE_INFO #define MODULE_INFO(tag, info) \ static const char __module_cat(tag, __LINE__)[] \ __attribute__((section(".modinfo"), unused)) = __stringify(tag) "=" info #endif /* * MODULE_VERSION was born in 2.6.4. The earlier form appends a long "\0xxx" * string to the module's version, but that was removed in 2.6.10, so we'll * ignore it in our wrapper. */ #ifndef MODULE_VERSION #define MODULE_VERSION(_version) MODULE_INFO(version, _version) #endif /* * Linux kernel < 2.6.31 takes 'int' for 'bool' module parameters. * Linux kernel >= 3.3.0 takes 'bool' for 'bool' module parameters. * Kernels between the two take either. So flip switch at 3.0.0. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) typedef bool compat_mod_param_bool; #else typedef int compat_mod_param_bool; #endif #endif /* __COMPAT_MODULE_H__ */ vmhgfs-only/shared/vm_basic_asm_x86_64.h 0000444 0000000 0000000 00000042355 13432725350 017120 0 ustar root root /********************************************************* * Copyright (C) 1998-2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vm_basic_asm_x86_64.h * * Basic x86_64 asm macros. */ #ifndef _VM_BASIC_ASM_X86_64_H_ #define _VM_BASIC_ASM_X86_64_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" #ifndef VM_X86_64 #error "This file is x86-64 only!" #endif #if defined(_MSC_VER) && !defined(BORA_NO_WIN32_INTRINS) #ifdef __cplusplus extern "C" { #endif uint64 _umul128(uint64 multiplier, uint64 multiplicand, uint64 *highProduct); int64 _mul128(int64 multiplier, int64 multiplicand, int64 *highProduct); uint64 __shiftright128(uint64 lowPart, uint64 highPart, uint8 shift); #ifdef __cplusplus } #endif #pragma intrinsic(_umul128, _mul128, __shiftright128) #endif // _MSC_VER #if defined(__GNUC__) /* * GET_CURRENT_PC * * Returns the current program counter (i.e. instruction pointer i.e. rip * register on x86_64). In the example below: * * foo.c * L123: Foo(GET_CURRENT_PC()) * * the return value from GET_CURRENT_PC will point a debugger to L123. */ #define GET_CURRENT_PC() ({ \ void *__rip; \ asm("lea 0(%%rip), %0;\n\t" \ : "=r" (__rip)); \ __rip; \ }) /* * GET_CURRENT_LOCATION * * Updates the arguments with the values of the %rip, %rbp, and %rsp * registers at the current code location where the macro is invoked, * and the return address. */ #define GET_CURRENT_LOCATION(rip, rbp, rsp, retAddr) do { \ asm("lea 0(%%rip), %0\n" \ "mov %%rbp, %1\n" \ "mov %%rsp, %2\n" \ : "=r" (rip), "=r" (rbp), "=r" (rsp)); \ retAddr = (uint64) GetReturnAddress(); \ } while (0) #endif /* * FXSAVE/FXRSTOR * save/restore SIMD/MMX fpu state * * The pointer passed in must be 16-byte aligned. * * Intel and AMD processors behave differently w.r.t. fxsave/fxrstor. Intel * processors unconditionally save the exception pointer state (instruction * ptr., data ptr., and error instruction opcode). FXSAVE_ES1 and FXRSTOR_ES1 * work correctly for Intel processors. * * AMD processors only save the exception pointer state if ES=1. This leads to a * security hole whereby one process/VM can inspect the state of another process * VM. The AMD recommended workaround involves clobbering the exception pointer * state unconditionally, and this is implemented in FXRSTOR_AMD_ES0. Note that * FXSAVE_ES1 will only save the exception pointer state for AMD processors if * ES=1. * * The workaround (FXRSTOR_AMD_ES0) only costs 1 cycle more than just doing an * fxrstor, on both AMD Opteron and Intel Core CPUs. */ #if defined(__GNUC__) static INLINE void FXSAVE_ES1(void *save) { __asm__ __volatile__ ("fxsaveq %0 \n" : "=m" (*(uint8 *)save) : : "memory"); } static INLINE void FXSAVE_COMPAT_ES1(void *save) { __asm__ __volatile__ ("fxsave %0 \n" : "=m" (*(uint8 *)save) : : "memory"); } static INLINE void FXRSTOR_ES1(const void *load) { __asm__ __volatile__ ("fxrstorq %0 \n" : : "m" (*(const uint8 *)load) : "memory"); } static INLINE void FXRSTOR_COMPAT_ES1(const void *load) { __asm__ __volatile__ ("fxrstor %0 \n" : : "m" (*(const uint8 *)load) : "memory"); } static INLINE void FXRSTOR_AMD_ES0(const void *load) { uint64 dummy = 0; __asm__ __volatile__ ("fnstsw %%ax \n" // Grab x87 ES bit "bt $7,%%ax \n" // Test ES bit "jnc 1f \n" // Jump if ES=0 "fnclex \n" // ES=1. Clear it so fild doesn't trap "1: \n" "ffree %%st(7) \n" // Clear tag bit - avoid poss. stack overflow "fildl %0 \n" // Dummy Load from "safe address" changes all // x87 exception pointers. "fxrstorq %1 \n" : : "m" (dummy), "m" (*(const uint8 *)load) : "ax", "memory"); } #endif /* __GNUC__ */ /* * XSAVE/XRSTOR * save/restore GSSE/SIMD/MMX fpu state * * The pointer passed in must be 64-byte aligned. * See above comment for more information. */ #if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS)) static INLINE void XSAVE_ES1(void *save, uint64 mask) { #if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1 __asm__ __volatile__ ( ".byte 0x48, 0x0f, 0xae, 0x21 \n" : : "c" ((uint8 *)save), "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); #else __asm__ __volatile__ ( "xsaveq %0 \n" : "=m" (*(uint8 *)save) : "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); #endif } static INLINE void XSAVE_COMPAT_ES1(void *save, uint64 mask) { #if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1 __asm__ __volatile__ ( ".byte 0x0f, 0xae, 0x21 \n" : : "c" ((uint8 *)save), "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); #else __asm__ __volatile__ ( "xsave %0 \n" : "=m" (*(uint8 *)save) : "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); #endif } static INLINE void XSAVEOPT_ES1(void *save, uint64 mask) { __asm__ __volatile__ ( ".byte 0x48, 0x0f, 0xae, 0x31 \n" : : "c" ((uint8 *)save), "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); } static INLINE void XRSTOR_ES1(const void *load, uint64 mask) { #if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1 __asm__ __volatile__ ( ".byte 0x48, 0x0f, 0xae, 0x29 \n" : : "c" ((const uint8 *)load), "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); #else __asm__ __volatile__ ( "xrstorq %0 \n" : : "m" (*(const uint8 *)load), "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); #endif } static INLINE void XRSTOR_COMPAT_ES1(const void *load, uint64 mask) { #if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1 __asm__ __volatile__ ( ".byte 0x0f, 0xae, 0x29 \n" : : "c" ((const uint8 *)load), "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); #else __asm__ __volatile__ ( "xrstor %0 \n" : : "m" (*(const uint8 *)load), "a" ((uint32)mask), "d" ((uint32)(mask >> 32)) : "memory"); #endif } static INLINE void XRSTOR_AMD_ES0(const void *load, uint64 mask) { uint64 dummy = 0; __asm__ __volatile__ ("fnstsw %%ax \n" // Grab x87 ES bit "bt $7,%%ax \n" // Test ES bit "jnc 1f \n" // Jump if ES=0 "fnclex \n" // ES=1. Clear it so fild doesn't trap "1: \n" "ffree %%st(7) \n" // Clear tag bit - avoid poss. stack overflow "fildl %0 \n" // Dummy Load from "safe address" changes all // x87 exception pointers. "mov %%ebx, %%eax \n" #if __GNUC__ < 4 || __GNUC__ == 4 && __GNUC_MINOR__ == 1 ".byte 0x48, 0x0f, 0xae, 0x29 \n" : : "m" (dummy), "c" ((const uint8 *)load), "b" ((uint32)mask), "d" ((uint32)(mask >> 32)) #else "xrstorq %1 \n" : : "m" (dummy), "m" (*(const uint8 *)load), "b" ((uint32)mask), "d" ((uint32)(mask >> 32)) #endif : "eax", "memory"); } #endif /* __GNUC__ */ /* * XTEST * Return TRUE if processor is in transaction region. * */ #if defined(__GNUC__) && (defined(VMM) || defined(VMKERNEL) || defined(FROBOS)) static INLINE Bool xtest(void) { uint8 al; __asm__ __volatile__(".byte 0x0f, 0x01, 0xd6 # xtest \n" "setnz %%al\n" : "=a"(al) : : "cc"); return al; } #endif /* __GNUC__ */ /* *----------------------------------------------------------------------------- * * Mul64x6464 -- * * Unsigned integer by fixed point multiplication, with rounding: * result = floor(multiplicand * multiplier * 2**(-shift) + 0.5) * * Unsigned 64-bit integer multiplicand. * Unsigned 64-bit fixed point multiplier, represented as * (multiplier, shift), where shift < 64. * * Result: * Unsigned 64-bit integer product. * *----------------------------------------------------------------------------- */ #if defined(__GNUC__) && !defined(MUL64_NO_ASM) static INLINE uint64 Mul64x6464(uint64 multiplicand, uint64 multiplier, uint32 shift) { /* * Implementation: * Multiply 64x64 bits to yield a full 128-bit product. * Clear the carry bit (needed for the shift == 0 case). * Shift result in RDX:RAX right by "shift". * Add the carry bit. (If shift > 0, this is the highest order bit * that was discarded by the shift; else it is 0.) * Return the low-order 64 bits of the above. * */ uint64 result, dummy; __asm__("mulq %3 \n\t" "clc \n\t" "shrdq %b4, %1, %0 \n\t" "adc $0, %0 \n\t" : "=a" (result), "=d" (dummy) : "0" (multiplier), "rm" (multiplicand), "c" (shift) : "cc"); return result; } #elif defined(_MSC_VER) && !defined(MUL64_NO_ASM) static INLINE uint64 Mul64x6464(uint64 multiplicand, uint64 multiplier, uint32 shift) { /* * Unfortunately, MSVC intrinsics don't give us access to the carry * flag after a 128-bit shift, so the implementation is more * awkward: * Multiply 64x64 bits to yield a full 128-bit product. * Shift result right by "shift". * If shift != 0, extract and add in highest order bit that was * discarded by the shift. * Return the low-order 64 bits of the above. */ uint64 tmplo, tmphi; tmplo = _umul128(multiplicand, multiplier, &tmphi); if (shift == 0) { return tmplo; } else { return __shiftright128(tmplo, tmphi, (uint8) shift) + ((tmplo >> (shift - 1)) & 1); } } #else #define MUL64_NO_ASM 1 #include "mul64.h" #endif /* *----------------------------------------------------------------------------- * * Muls64x64s64 -- * * Signed integer by fixed point multiplication, with rounding: * result = floor(multiplicand * multiplier * 2**(-shift) + 0.5) * * Signed 64-bit integer multiplicand. * Unsigned 64-bit fixed point multiplier, represented as * (multiplier, shift), where shift < 64. * * Result: * Signed 64-bit integer product. * *----------------------------------------------------------------------------- */ #if defined(__GNUC__) && !defined(MUL64_NO_ASM) static inline int64 Muls64x64s64(int64 multiplicand, int64 multiplier, uint32 shift) { int64 result, dummy; /* Implementation: * Multiply 64x64 bits to yield a full 128-bit product. * Clear the carry bit (needed for the shift == 0 case). * Shift result in RDX:RAX right by "shift". * Add the carry bit. (If shift > 0, this is the highest order bit * that was discarded by the shift; else it is 0.) * Return the low-order 64 bits of the above. * * Note: using the unsigned shrd instruction is correct because * shift < 64 and we return only the low 64 bits of the shifted * result. */ __asm__("imulq %3 \n\t" "clc \n\t" "shrdq %b4, %1, %0 \n\t" "adc $0, %0 \n\t" : "=a" (result), "=d" (dummy) : "0" (multiplier), "rm" (multiplicand), "c" (shift) : "cc"); return result; } #elif defined(_MSC_VER) && !defined(MUL64_NO_ASM) static INLINE int64 Muls64x64s64(int64 multiplicand, int64 multiplier, uint32 shift) { /* * Unfortunately, MSVC intrinsics don't give us access to the carry * flag after a 128-bit shift, so the implementation is more * awkward: * Multiply 64x64 bits to yield a full 128-bit product. * Shift result right by "shift". * If shift != 0, extract and add in highest order bit that was * discarded by the shift. * Return the low-order 64 bits of the above. * * Note: using an unsigned shift is correct because shift < 64 and * we return only the low 64 bits of the shifted result. */ int64 tmplo, tmphi; tmplo = _mul128(multiplicand, multiplier, &tmphi); if (shift == 0) { return tmplo; } else { return __shiftright128(tmplo, tmphi, (uint8) shift) + ((tmplo >> (shift - 1)) & 1); } } #endif #ifndef MUL64_NO_ASM /* *----------------------------------------------------------------------------- * * Mul64x3264 -- * * Unsigned integer by fixed point multiplication, with rounding: * result = floor(multiplicand * multiplier * 2**(-shift) + 0.5) * * Unsigned 64-bit integer multiplicand. * Unsigned 32-bit fixed point multiplier, represented as * (multiplier, shift), where shift < 64. * * Result: * Unsigned 64-bit integer product. * *----------------------------------------------------------------------------- */ static INLINE uint64 Mul64x3264(uint64 multiplicand, uint32 multiplier, uint32 shift) { return Mul64x6464(multiplicand, multiplier, shift); } /* *----------------------------------------------------------------------------- * * Muls64x32s64 -- * * Signed integer by fixed point multiplication, with rounding: * result = floor(multiplicand * multiplier * 2**(-shift) + 0.5) * * Signed 64-bit integer multiplicand. * Unsigned 32-bit fixed point multiplier, represented as * (multiplier, shift), where shift < 64. * * Result: * Signed 64-bit integer product. * *----------------------------------------------------------------------------- */ static INLINE int64 Muls64x32s64(int64 multiplicand, uint32 multiplier, uint32 shift) { return Muls64x64s64(multiplicand, multiplier, shift); } #endif #if defined(__GNUC__) static INLINE void * uint64set(void *dst, uint64 val, uint64 count) { int dummy0; int dummy1; __asm__ __volatile__("\t" "cld" "\n\t" "rep ; stosq" "\n" : "=c" (dummy0), "=D" (dummy1) : "0" (count), "1" (dst), "a" (val) : "memory", "cc"); return dst; } #endif /* *----------------------------------------------------------------------------- * * Div643232 -- * * Unsigned integer division: * The dividend is 64-bit wide * The divisor is 32-bit wide * The quotient is 32-bit wide * * Use this function if you are certain that the quotient will fit in 32 bits, * If that is not the case, a #DE exception was generated in 32-bit version, * but not in this 64-bit version. So please be careful. * * Results: * Quotient and remainder * * Side effects: * None * *----------------------------------------------------------------------------- */ #if defined(__GNUC__) || defined(_MSC_VER) static INLINE void Div643232(uint64 dividend, // IN uint32 divisor, // IN uint32 *quotient, // OUT uint32 *remainder) // OUT { *quotient = (uint32)(dividend / divisor); *remainder = (uint32)(dividend % divisor); } #endif /* *----------------------------------------------------------------------------- * * Div643264 -- * * Unsigned integer division: * The dividend is 64-bit wide * The divisor is 32-bit wide * The quotient is 64-bit wide * * Results: * Quotient and remainder * * Side effects: * None * *----------------------------------------------------------------------------- */ #if defined(__GNUC__) static INLINE void Div643264(uint64 dividend, // IN uint32 divisor, // IN uint64 *quotient, // OUT uint32 *remainder) // OUT { *quotient = dividend / divisor; *remainder = dividend % divisor; } #endif #endif // _VM_BASIC_ASM_X86_64_H_ vmhgfs-only/shared/compat_timer.h 0000444 0000000 0000000 00000006551 13432725347 016146 0 ustar root root /********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_TIMER_H__ # define __COMPAT_TIMER_H__ /* * The del_timer_sync() API appeared in 2.3.43 * It became reliable in 2.4.0-test3 * * --hpreg */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0) # define compat_del_timer_sync(timer) del_timer_sync(timer) #else # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43) /* 2.3.43 removed asm/softirq.h's reference to bh_base. */ # include <linux/interrupt.h> # endif # include <asm/softirq.h> static inline int compat_del_timer_sync(struct timer_list *timer) // IN { int wasPending; start_bh_atomic(); wasPending = del_timer(timer); end_bh_atomic(); return wasPending; } #endif /* * The msleep_interruptible() API appeared in 2.6.9. * It is based on the msleep() API, which appeared in 2.4.29. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 9) # include <linux/delay.h> # define compat_msleep_interruptible(msecs) msleep_interruptible(msecs) # define compat_msleep(msecs) msleep(msecs) #else # include <linux/sched.h> /* * msecs_to_jiffies appeared in 2.6.7. For earlier kernels, * fall back to slow-case code (we don't use this operation * enough to need the performance). */ # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 7) # define msecs_to_jiffies(msecs) (((msecs) * HZ + 999) / 1000) # endif /* * set_current_state appeared in 2.2.18. */ # if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) # define set_current_state(a) do { current->state = (a); } while(0) # endif static inline void compat_msleep_interruptible(unsigned long msecs) // IN { set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(msecs) + 1); } static inline void compat_msleep(unsigned long msecs) // IN { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(msecs) + 1); } #endif /* * There is init_timer_deferrable() since 2.6.22. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) # define compat_init_timer_deferrable(timer) init_timer_deferrable(timer) #else # define compat_init_timer_deferrable(timer) init_timer(timer) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) static inline void compat_setup_timer(struct timer_list * timer, void (*function)(unsigned long), unsigned long data) { timer->function = function; timer->data = data; init_timer(timer); } #else # define compat_setup_timer(timer, function, data) \ setup_timer(timer, function, data) #endif #endif /* __COMPAT_TIMER_H__ */ vmhgfs-only/shared/kernelStubsLinux.c 0000444 0000000 0000000 00000024601 13432725330 016763 0 ustar root root /********************************************************* * Copyright (C) 2006-2014 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * kernelStubsLinux.c * * This file contains implementations of common userspace functions in terms * that the Linux kernel can understand. */ /* Must come before any kernel header file */ #include "driver-config.h" #include "kernelStubs.h" #include "compat_kernel.h" #include "compat_page.h" #include "compat_sched.h" #include <linux/slab.h> #include "vm_assert.h" /* *----------------------------------------------------------------------------- * * Panic -- * * Prints the debug message and stops the system. * * Results: * None. * * Side effects: * None * *----------------------------------------------------------------------------- */ void Panic(const char *fmt, ...) // IN { va_list args; char *result; va_start(args, fmt); result = Str_Vasprintf(NULL, fmt, args); va_end(args); if (result) { printk(KERN_EMERG "%s", result); } BUG(); while (1); // Avoid compiler warning. } /* *---------------------------------------------------------------------- * * Str_Strcpy-- * * Wrapper for strcpy that checks for buffer overruns. * * Results: * Same as strcpy. * * Side effects: * None. * *---------------------------------------------------------------------- */ char * Str_Strcpy(char *buf, // OUT const char *src, // IN size_t maxSize) // IN { size_t len; len = strlen(src); if (len >= maxSize) { #ifdef GetReturnAddress Panic("%s:%d Buffer too small 0x%p\n", __FILE__, __LINE__, GetReturnAddress()); #else Panic("%s:%d Buffer too small\n", __FILE__, __LINE__); #endif } return memcpy(buf, src, len + 1); } /* *---------------------------------------------------------------------- * * Str_Vsnprintf -- * * Compatability wrapper b/w different libc versions * * Results: * int - number of bytes written (not including NULL terminate character), * -1 on overflow (insufficient space for NULL terminate is considered * overflow) * * NB: on overflow the buffer WILL be null terminated * * Side effects: * None * *---------------------------------------------------------------------- */ int Str_Vsnprintf(char *str, // OUT size_t size, // IN const char *format, // IN va_list arguments) // IN { int retval; retval = vsnprintf(str, size, format, arguments); /* * Linux glibc 2.0.x returns -1 and null terminates (which we shouldn't * be linking against), but glibc 2.1.x follows c99 and returns * characters that would have been written. */ if (retval >= size) { return -1; } return retval; } /* *----------------------------------------------------------------------------- * * Str_Vasprintf -- * * Allocate and format a string, using the GNU libc way to specify the * format (i.e. optionally allow the use of positional parameters) * * Results: * The allocated string on success (if 'length' is not NULL, *length * is set to the length of the allocated string) * NULL on failure * * Side effects: * None * *----------------------------------------------------------------------------- */ char * Str_Vasprintf(size_t *length, // OUT const char *format, // IN va_list arguments) // IN { /* * Simple implementation of Str_Vasprintf when userlevel libraries are not * available (e.g. for use in drivers). We just fallback to vsnprintf, * doubling if we didn't have enough space. */ unsigned int bufSize; char *buf; int retval; bufSize = strlen(format); buf = NULL; do { /* * Initial allocation of strlen(format) * 2. Should this be tunable? * XXX Yes, this could overflow and spin forever when you get near 2GB * allocations. I don't care. --rrdharan */ va_list args2; bufSize *= 2; buf = realloc(buf, bufSize); if (!buf) { return NULL; } va_copy(args2, arguments); retval = Str_Vsnprintf(buf, bufSize, format, args2); va_end(args2); } while (retval == -1); if (length) { *length = retval; } /* * Try to trim the buffer here to save memory? */ return buf; } /* *----------------------------------------------------------------------------- * * Str_Asprintf -- * * Same as Str_Vasprintf(), but parameters are passed inline --hpreg * * Results: * Same as Str_Vasprintf() * * Side effects: * Same as Str_Vasprintf() * *----------------------------------------------------------------------------- */ char * Str_Asprintf(size_t *length, // OUT const char *format, // IN ...) // IN { va_list arguments; char *result; va_start(arguments, format); result = Str_Vasprintf(length, format, arguments); va_end(arguments); return result; } /* *----------------------------------------------------------------------------- * * strdup -- * * Duplicates a string. * * Results: * A pointer to memory containing the duplicated string or NULL if no * memory was available. * * Side effects: * None * *----------------------------------------------------------------------------- */ char * strdup(const char *source) // IN { char *target = NULL; if (source) { /* * We call our special implementation of malloc() because the users of * strdup() will call free(), and that'll decrement the pointer before * freeing it. Thus, we need to make sure that the allocated block * also stores the block length before the block itself (see malloc() * below). */ unsigned int len = strlen(source); target = malloc(len + 1); if (target) { memcpy(target, source, len + 1); } } return target; } /* *---------------------------------------------------------------------------- * * mallocReal -- * * Allocate memory using kmalloc. There is no realloc * equivalent, so we roll our own by padding each allocation with * 4 (or 8 for 64 bit guests) extra bytes to store the block length. * * Results: * Pointer to driver heap memory, offset by 4 (or 8) * bytes from the real block pointer. * * Side effects: * None. * *---------------------------------------------------------------------------- */ static void * mallocReal(size_t size) // IN { size_t *ptr; ptr = kmalloc(size + sizeof size, GFP_KERNEL); if (ptr) { *ptr++ = size; } return ptr; } /* *---------------------------------------------------------------------------- * * malloc -- * * Allocate memory using the common mallocReal. * * Note: This calls mallocReal and not malloc as the gcc 5.1.1 optimizer * will replace the malloc and memset with a calloc call. This results * in calloc calling itself and results in system crashes. See bug 1413226. * * Results: * Pointer to driver heap memory, offset by 4 (or 8) * bytes from the real block pointer. * * Side effects: * None. * *---------------------------------------------------------------------------- */ void * malloc(size_t size) // IN { return mallocReal(size); } /* *--------------------------------------------------------------------------- * * free -- * * Free memory allocated by a previous call to malloc, calloc or realloc. * * Results: * None. * * Side effects: * Calls kfree to free the real (base) pointer. * *--------------------------------------------------------------------------- */ void free(void *mem) // IN { if (mem) { size_t *dataPtr = (size_t *)mem; kfree(--dataPtr); } } /* *---------------------------------------------------------------------------- * * calloc -- * * Malloc and zero. * * Note: This calls mallocReal and not malloc as the gcc 5.1.1 optimizer * will replace the malloc and memset with a calloc call. This results * for system crashes when used by kernel components. See bug 1413226. * * Results: * Pointer to driver heap memory (see malloc, above). * * Side effects: * None. * *---------------------------------------------------------------------------- */ void * calloc(size_t num, // IN size_t len) // IN { size_t size; void *ptr; size = num * len; ptr = mallocReal(size); if (ptr) { memset(ptr, 0, size); } return ptr; } /* *---------------------------------------------------------------------------- * * realloc -- * * Since the driver heap has no realloc equivalent, we have to roll our * own. Fortunately, we can retrieve the block size of every block we * hand out since we stashed it at allocation time (see malloc above). * * Results: * Pointer to memory block valid for 'newSize' bytes, or NULL if * allocation failed. * * Side effects: * Could copy memory around. * *---------------------------------------------------------------------------- */ void * realloc(void* ptr, // IN size_t newSize) // IN { void *newPtr; size_t *dataPtr; size_t length, lenUsed; dataPtr = (size_t *)ptr; length = ptr ? dataPtr[-1] : 0; if (newSize == 0) { if (ptr) { free(ptr); newPtr = NULL; } else { newPtr = malloc(newSize); } } else if (newSize == length) { newPtr = ptr; } else if ((newPtr = malloc(newSize))) { if (length < newSize) { lenUsed = length; } else { lenUsed = newSize; } memcpy(newPtr, ptr, lenUsed); free(ptr); } return newPtr; } vmhgfs-only/shared/vmciKernelAPI1.h 0000444 0000000 0000000 00000020110 13432725350 016152 0 ustar root root /********************************************************* * Copyright (C) 2010,2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciKernelAPI1.h -- * * Kernel API (v1) exported from the VMCI host and guest drivers. */ #ifndef __VMCI_KERNELAPI_1_H__ #define __VMCI_KERNELAPI_1_H__ #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #include "includeCheck.h" #include "vmci_defs.h" #include "vmci_call_defs.h" #if defined __cplusplus extern "C" { #endif /* VMCI module namespace on vmkernel. */ #define MOD_VMCI_NAMESPACE "com.vmware.vmci" /* Define version 1. */ #undef VMCI_KERNEL_API_VERSION #define VMCI_KERNEL_API_VERSION_1 1 #define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_1 /* Macros to operate on the driver version number. */ #define VMCI_MAJOR_VERSION(v) (((v) >> 16) & 0xffff) #define VMCI_MINOR_VERSION(v) ((v) & 0xffff) #if defined(_WIN32) /* Path to callback object in object manager, for Windows only. */ #define VMCI_CALLBACK_OBJECT_PATH L"\\Callback\\VMCIDetachCB" #endif // _WIN32 /* VMCI Device Usage API. */ #if defined(__linux__) && !defined(VMKERNEL) #define vmci_device_get(_a, _b, _c, _d) 1 #define vmci_device_release(_x) #else // !linux typedef void (VMCI_DeviceShutdownFn)(void *deviceRegistration, void *userData); Bool vmci_device_get(uint32 *apiVersion, VMCI_DeviceShutdownFn *deviceShutdownCB, void *userData, void **deviceRegistration); void vmci_device_release(void *deviceRegistration); #endif // !linux #if defined(_WIN32) /* Called when the client is unloading, for Windows only. */ void vmci_exit(void); #endif // _WIN32 /* VMCI Datagram API. */ int vmci_datagram_create_handle(uint32 resourceId, uint32 flags, VMCIDatagramRecvCB recvCB, void *clientData, VMCIHandle *outHandle); int vmci_datagram_create_handle_priv(uint32 resourceID, uint32 flags, VMCIPrivilegeFlags privFlags, VMCIDatagramRecvCB recvCB, void *clientData, VMCIHandle *outHandle); int vmci_datagram_destroy_handle(VMCIHandle handle); int vmci_datagram_send(VMCIDatagram *msg); /* VMCI Utility API. */ VMCIId vmci_get_context_id(void); #if defined(__linux__) && !defined(VMKERNEL) /* Returned value is a bool, 0 for false, 1 for true. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) int vmci_is_context_owner(VMCIId contextID, kuid_t uid); #else int vmci_is_context_owner(VMCIId contextID, uid_t uid); #endif #else // !linux || VMKERNEL /* Returned value is a VMCI error code. */ int vmci_is_context_owner(VMCIId contextID, void *hostUser); #endif // !linux || VMKERNEL uint32 vmci_version(void); int vmci_cid_2_host_vm_id(VMCIId contextID, void *hostVmID, size_t hostVmIDLen); /* VMCI Event API. */ typedef void (*VMCI_EventCB)(VMCIId subID, VMCI_EventData *ed, void *clientData); int vmci_event_subscribe(VMCI_Event event, #if !defined(__linux__) && !defined(__FreeBSD__) || defined(VMKERNEL) uint32 flags, #endif // !linux && !FreeBSD || VMKERNEL VMCI_EventCB callback, void *callbackData, VMCIId *subID); int vmci_event_unsubscribe(VMCIId subID); /* VMCI Context API */ VMCIPrivilegeFlags vmci_context_get_priv_flags(VMCIId contextID); /* VMCI Queue Pair API. */ typedef struct VMCIQPair VMCIQPair; int vmci_qpair_alloc(VMCIQPair **qpair, VMCIHandle *handle, uint64 produceQSize, uint64 consumeQSize, VMCIId peer, uint32 flags, VMCIPrivilegeFlags privFlags); int vmci_qpair_detach(VMCIQPair **qpair); int vmci_qpair_get_produce_indexes(const VMCIQPair *qpair, uint64 *producerTail, uint64 *consumerHead); int vmci_qpair_get_consume_indexes(const VMCIQPair *qpair, uint64 *consumerTail, uint64 *producerHead); int64 vmci_qpair_produce_free_space(const VMCIQPair *qpair); int64 vmci_qpair_produce_buf_ready(const VMCIQPair *qpair); int64 vmci_qpair_consume_free_space(const VMCIQPair *qpair); int64 vmci_qpair_consume_buf_ready(const VMCIQPair *qpair); ssize_t vmci_qpair_enqueue(VMCIQPair *qpair, const void *buf, size_t bufSize, int mode); ssize_t vmci_qpair_dequeue(VMCIQPair *qpair, void *buf, size_t bufSize, int mode); ssize_t vmci_qpair_peek(VMCIQPair *qpair, void *buf, size_t bufSize, int mode); #if (defined(__APPLE__) && !defined (VMX86_TOOLS)) || \ (defined(__linux__) && defined(__KERNEL__)) || \ (defined(_WIN32) && defined(WINNT_DDK)) /* * Environments that support struct iovec */ ssize_t vmci_qpair_enquev(VMCIQPair *qpair, void *iov, size_t iovSize, int mode); ssize_t vmci_qpair_dequev(VMCIQPair *qpair, void *iov, size_t iovSize, int mode); ssize_t vmci_qpair_peekv(VMCIQPair *qpair, void *iov, size_t iovSize, int mode); #endif /* Systems that support struct iovec */ /* Typedefs for all of the above, used by the IOCTLs and the kernel library. */ typedef void (VMCI_DeviceReleaseFct)(void *); typedef int (VMCIDatagram_CreateHndFct)(VMCIId, uint32, VMCIDatagramRecvCB, void *, VMCIHandle *); typedef int (VMCIDatagram_CreateHndPrivFct)(VMCIId, uint32, VMCIPrivilegeFlags, VMCIDatagramRecvCB, void *, VMCIHandle *); typedef int (VMCIDatagram_DestroyHndFct)(VMCIHandle); typedef int (VMCIDatagram_SendFct)(VMCIDatagram *); typedef VMCIId (VMCI_GetContextIDFct)(void); typedef uint32 (VMCI_VersionFct)(void); typedef int (VMCI_ContextID2HostVmIDFct)(VMCIId, void *, size_t); typedef int (VMCI_IsContextOwnerFct)(VMCIId, void *); typedef int (VMCIEvent_SubscribeFct)(VMCI_Event, uint32, VMCI_EventCB, void *, VMCIId *); typedef int (VMCIEvent_UnsubscribeFct)(VMCIId); typedef VMCIPrivilegeFlags (VMCIContext_GetPrivFlagsFct)(VMCIId); typedef int (VMCIQPair_AllocFct)(VMCIQPair **, VMCIHandle *, uint64, uint64, VMCIId, uint32, VMCIPrivilegeFlags); typedef int (VMCIQPair_DetachFct)(VMCIQPair **); typedef int (VMCIQPair_GetProduceIndexesFct)(const VMCIQPair *, uint64 *, uint64 *); typedef int (VMCIQPair_GetConsumeIndexesFct)(const VMCIQPair *, uint64 *, uint64 *); typedef int64 (VMCIQPair_ProduceFreeSpaceFct)(const VMCIQPair *); typedef int64 (VMCIQPair_ProduceBufReadyFct)(const VMCIQPair *); typedef int64 (VMCIQPair_ConsumeFreeSpaceFct)(const VMCIQPair *); typedef int64 (VMCIQPair_ConsumeBufReadyFct)(const VMCIQPair *); typedef ssize_t (VMCIQPair_EnqueueFct)(VMCIQPair *, const void *, size_t, int); typedef ssize_t (VMCIQPair_DequeueFct)(VMCIQPair *, void *, size_t, int); typedef ssize_t (VMCIQPair_PeekFct)(VMCIQPair *, void *, size_t, int); typedef ssize_t (VMCIQPair_EnqueueVFct)(VMCIQPair *qpair, void *, size_t, int); typedef ssize_t (VMCIQPair_DequeueVFct)(VMCIQPair *qpair, void *, size_t, int); typedef ssize_t (VMCIQPair_PeekVFct)(VMCIQPair *qpair, void *, size_t, int); #if defined __cplusplus } // extern "C" #endif #endif /* !__VMCI_KERNELAPI_1_H__ */ vmhgfs-only/shared/vmci_infrastructure.h 0000444 0000000 0000000 00000010362 13432725350 017546 0 ustar root root /********************************************************* * Copyright (C) 2006,2014 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmci_infrastructure.h -- * * This file implements the VMCI infrastructure. */ #ifndef _VMCI_INFRASTRUCTURE_H_ #define _VMCI_INFRASTRUCTURE_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #include "includeCheck.h" #include "vmware.h" #include "vmci_defs.h" #if defined __cplusplus extern "C" { #endif typedef enum { VMCIOBJ_VMX_VM = 10, VMCIOBJ_CONTEXT, VMCIOBJ_SOCKET, VMCIOBJ_NOT_SET, } VMCIObjType; /* For storing VMCI structures in file handles. */ typedef struct VMCIObj { void *ptr; VMCIObjType type; } VMCIObj; /* Guestcalls currently support a maximum of 8 uint64 arguments. */ #define VMCI_GUESTCALL_MAX_ARGS_SIZE 64 /* * Structure used for checkpointing the doorbell mappings. It is * written to the checkpoint as is, so changing this structure will * break checkpoint compatibility. */ typedef struct VMCIDoorbellCptState { VMCIHandle handle; uint64 bitmapIdx; } VMCIDoorbellCptState; /* Used to determine what checkpoint state to get and set. */ #define VMCI_NOTIFICATION_CPT_STATE 0x1 #define VMCI_WELLKNOWN_CPT_STATE 0x2 #define VMCI_DG_OUT_STATE 0x3 #define VMCI_DG_IN_STATE 0x4 #define VMCI_DG_IN_SIZE_STATE 0x5 #define VMCI_DOORBELL_CPT_STATE 0x6 #define VMCI_DG_HYPERVISOR_SAVE_STATE_SIZE 0x7 #define VMCI_DG_HYPERVISOR_SAVE_STATE 0x8 /* Used to control the VMCI device in the vmkernel */ #define VMCI_DEV_RESET 0x01 #define VMCI_DEV_QP_RESET 0x02 // DEPRECATED #define VMCI_DEV_QUIESCE 0x03 #define VMCI_DEV_UNQUIESCE 0x04 #define VMCI_DEV_QP_BREAK_SHARING 0x05 // DEPRECATED #define VMCI_DEV_RESTORE_SYNC 0x06 #define VMCI_DEV_BMASTER_OFF 0x07 #define VMCI_DEV_BMASTER_ON 0x08 /* *------------------------------------------------------------------------- * * VMCI_Hash -- * * Hash function used by the Simple Datagram API. Based on the djb2 * hash function by Dan Bernstein. * * Result: * Returns guest call size. * * Side effects: * None. * *------------------------------------------------------------------------- */ static INLINE int VMCI_Hash(VMCIHandle handle, // IN unsigned size) // IN { unsigned i; int hash = 5381; const uint64 handleValue = QWORD(handle.resource, handle.context); for (i = 0; i < sizeof handle; i++) { hash = ((hash << 5) + hash) + (uint8)(handleValue >> (i * 8)); } return hash & (size - 1); } /* *------------------------------------------------------------------------- * * VMCI_HashId -- * * Hash function used by the Simple Datagram API. Hashes only a VMCI id * (not the full VMCI handle) Based on the djb2 * hash function by Dan Bernstein. * * Result: * Returns guest call size. * * Side effects: * None. * *------------------------------------------------------------------------- */ static INLINE int VMCI_HashId(VMCIId id, // IN unsigned size) // IN { unsigned i; int hash = 5381; for (i = 0; i < sizeof id; i++) { hash = ((hash << 5) + hash) + (uint8)(id >> (i * 8)); } return hash & (size - 1); } #if defined __cplusplus } // extern "C" #endif #endif // _VMCI_INFRASTRUCTURE_H_ vmhgfs-only/shared/autoconf/ 0000755 0000000 0000000 00000000000 13432726375 015125 5 ustar root root vmhgfs-only/shared/autoconf/file_operations_fsync.c 0000444 0000000 0000000 00000002746 13432725347 021662 0 ustar root root /********************************************************* * Copyright (C) 2011 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * Linux v3.1 added 2 params to fsync for fine-grained locking control. * But SLES11 SP2 has backported the change to its 3.0 kernel, * so we can't rely solely on kernel version to determine number of * arguments. */ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 0, 0) # error This compile test intentionally fails. #else #include <linux/fs.h> #include <linux/types.h> /* loff_t */ static int TestFsync(struct file *file, loff_t start, loff_t end, int datasync) { return 0; } struct file_operations testFO = { .fsync = TestFsync, }; #endif vmhgfs-only/shared/autoconf/getsb1.c 0000444 0000000 0000000 00000003076 13432725347 016460 0 ustar root root /********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) #include <linux/fs.h> /* * Around 2.6.18, a pointer to a vfsmount was added to get_sb. Red Hat * backported this behavior into a 2.6.17 kernel. * * This test will fail on a kernel with such a patch. */ static struct super_block * LinuxDriverGetSb(struct file_system_type *fs_type, int flags, const char *dev_name, void *rawData) { return 0; } struct file_system_type fs_type = { .get_sb = LinuxDriverGetSb }; #else #error "This test intentionally fails on 2.6.19 or newer kernels." #endif vmhgfs-only/shared/autoconf/file_operations_flush.c 0000444 0000000 0000000 00000002664 13432725347 021660 0 ustar root root /********************************************************* * Copyright (C) 2013-2014 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * Linux v2.6.18 added an owner parameter to flush. * But SLES10 has backported the change to its 2.6.16.60 kernel, * so we can't rely solely on kernel version to determine number of * arguments. * * This test will fail on a kernel with such a patch. */ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18) #error This compile test intentionally fails on 2.6.18 and newer kernels. #else #include <linux/fs.h> static int TestFlush(struct file *file); { return 0; } struct file_operations testFO = { .flush = TestFlush, }; #endif vmhgfs-only/shared/autoconf/netif_num_params.c 0000444 0000000 0000000 00000003352 13432725347 020617 0 ustar root root /********************************************************* * Copyright (C) 2009 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * Detect whether netif_rx_complete (and netif_rx_schedule) take a single * napi_struct argument. The foundation was laid whith introducing Generic * Receive Offload infrastructure but dropping unneeded net_device argument * did not happen till few commits later so we can't simply test for presence * of NETIF_F_GRO. * * Test succeeds if netif_rx_complete takes dev & napi arguments, or if it * takes dev argument only (kernels before 2.6.24). Test fails if netif_rx_complete * takes only single napi argument. */ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) # error This compile test intentionally fails. #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) #include <linux/netdevice.h> #ifdef NETIF_F_GRO void test_netif_rx_complete(struct net_device *dev, struct napi_struct *napi) { netif_rx_complete(dev, napi); } #endif #endif vmhgfs-only/shared/autoconf/cachector.c 0000444 0000000 0000000 00000003270 13432725347 017222 0 ustar root root /********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include "compat_version.h" #include "compat_autoconf.h" /* * Between 2.6.23 and 2.6.24-rc1 ctor prototype was changed from * ctor(ptr, cache, flags) to ctor(cache, ptr). Unfortunately there * is no typedef for ctor, so we have to redefine kmem_cache_create * to find out ctor prototype. This assumes that kmem_cache_create * takes 5 arguments and not 6 - that change occured between * 2.6.22 and 2.6.23-rc1. If prototype matches, then this is old * kernel. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) #error "This test intentionally fails on 2.6.24 and newer kernels." #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) #include <linux/slab.h> struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, void (*)(void *, struct kmem_cache *, unsigned long)); #endif vmhgfs-only/shared/autoconf/geninclude.c 0000444 0000000 0000000 00000002321 13432725347 017400 0 ustar root root /********************************************************* * Copyright (C) 2003 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include "compat_version.h" #include "compat_autoconf.h" #ifdef CONFIG_X86_VOYAGER APATH/mach-voyager #endif #ifdef CONFIG_X86_VISWS APATH/mach-visws #endif #ifdef CONFIG_X86_NUMAQ APATH/mach-numaq #endif #ifdef CONFIG_X86_BIGSMP APATH/mach-bigsmp #endif #ifdef CONFIG_X86_SUMMIT APATH/mach-summit #endif #ifdef CONFIG_X86_GENERICARCH APATH/mach-generic #endif APATH/mach-default vmhgfs-only/shared/autoconf/cachector1.c 0000444 0000000 0000000 00000003075 13432725347 017306 0 ustar root root /********************************************************* * Copyright (C) 2008 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include "compat_version.h" #include "compat_autoconf.h" /* * Between 2.6.27-rc1 and 2.6.27-rc2 ctor prototype was changed from * ctor(cache, ptr) to ctor(ptr). Unfortunately there * is no typedef for ctor, so we have to redefine kmem_cache_create * to find out ctor prototype. If prototype matches, then this is old * kernel. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28) #error "This test intentionally fails on 2.6.28 and newer kernels." #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 26) #include <linux/slab.h> struct kmem_cache *kmem_cache_create(const char *, size_t, size_t, unsigned long, void (*)(struct kmem_cache *, void *)); #endif vmhgfs-only/shared/autoconf/wait_on_bit.c 0000444 0000000 0000000 00000003306 13432725347 017565 0 ustar root root /********************************************************* * Copyright (C) 2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0) && \ LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) #include <linux/fs.h> #include <linux/wait.h> #include <linux/sched.h> unsigned long test_bits; /* * After 3.17.0, wait_on_bit changed its interface to remove the action * callback argument and this was backported to some Linux kernel versions * such as 3.10 for the RHEL 7.3 version. * * This test will fail on a kernel with such a patch. */ int test(void) { return wait_on_bit(&test_bits, 0, NULL, TASK_UNINTERRUPTIBLE); } #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 17, 0) #error "This test intentionally fails on 3.17.0 and newer kernels." #else /* * It must be older than 2.6.13 in which case we don't use the function. */ #endif vmhgfs-only/shared/autoconf/cachecreate.c 0000444 0000000 0000000 00000003210 13432725347 017510 0 ustar root root /********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include "compat_version.h" #include "compat_autoconf.h" /* * All kernels before 2.6.22 take 6 arguments. All kernels since * 2.6.23-rc1 take 5 arguments. Only kernels between 2.6.22 and * 2.6.23-rc1 are questionable - we could ignore them if we wanted, * nobody cares about them even now. But unfortunately RedHat is * re-releasing 2.6.X-rc kernels under 2.6.(X-1) name, so they * are releasing 2.6.23-rc1 as 2.6.22-5055-something, so we have * to do autodetection for them. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) /* Success... */ #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) #error "This test intentionally fails on 2.6.23 and newer kernels." #else #include <linux/slab.h> struct kmem_cache *kmemtest(void) { return kmem_cache_create("test", 12, 0, 0, NULL, NULL); } #endif vmhgfs-only/shared/autoconf/skblin.c 0000444 0000000 0000000 00000002572 13432725347 016555 0 ustar root root /********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * Detect whether skb_linearize takes one or two arguments. */ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 6, 17) /* * Since 2.6.18 all kernels have single-argument skb_linearize. For * older kernels use autodetection. Not using autodetection on newer * kernels saves us from compile failure on some post 2.6.18 kernels * which do not have selfcontained skbuff.h. */ #include <linux/skbuff.h> int test_skb_linearize(struct sk_buff *skb) { return skb_linearize(skb); } #endif vmhgfs-only/shared/autoconf/dalias.c 0000444 0000000 0000000 00000003217 13432725347 016525 0 ustar root root /********************************************************* * Copyright (C) 2015-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) #include <linux/dcache.h> #include <linux/list.h> /* * After 3.19.0, the dentry d_alias field was moved. Fedora * backported this behavior into earlier kernel versions. * The type of the d_alias field changed from 3.6 onwards * which was a list head to being a list node. The check * for earlier than 3.6 is done separately. * * This test will fail on a kernel with such a patch. */ void test(void) { struct dentry aliasDentry; INIT_HLIST_NODE(&aliasDentry.d_alias); } #else /* Intentionally passes for earlier than 3.6.0 kernels as a separate test is done. */ #endif #else #error "This test intentionally fails on 3.19.0 or newer kernels." #endif vmhgfs-only/shared/autoconf/truncate_pagecache.c 0000444 0000000 0000000 00000003315 13432725347 021074 0 ustar root root /********************************************************* * Copyright (C) 2015-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && \ LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) #include <linux/fs.h> #include <linux/mm.h> #include <linux/types.h> /* loff_t */ /* * After 3.12.0, truncate_pagecache changed its interface to just use * the new file size only. Red Hat backported this behavior into a 3.10.0 * kernel. * * This test will fail on a kernel with such a patch. */ void test(void) { struct inode inode; loff_t oldSize = 0; loff_t newSize = 4096; truncate_pagecache(&inode, oldSize, newSize); } #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) #error "This test intentionally fails on 3.12.0 and newer kernels." #else /* * It must be older than 2.6.32 in which case we assume success. * So not 3.12 compatible. There is no function for these versions. */ #endif vmhgfs-only/shared/autoconf/netcreate_num_params.c 0000444 0000000 0000000 00000003160 13432725347 021461 0 ustar root root /********************************************************* * Copyright (C) 2010 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * During 2.6.33 merge window net_proto_ops->create() method was changed - * a new 'kern' field, signalling whether socket is being created by kernel * or userspace application, was added to it. Unfortunately, some * distributions, such as RHEL 6, have backported the change to earlier * kernels, so we can't rely solely on kernel version to determine number of * arguments. */ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) # error This compile test intentionally fails. #else #include <linux/net.h> static int TestCreate(struct net *net, struct socket *sock, int protocol, int kern) { return 0; } struct net_proto_family testFamily = { .create = TestCreate, }; #endif vmhgfs-only/shared/autoconf/filldir1.c 0000444 0000000 0000000 00000003260 13432725347 016774 0 ustar root root /********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) #include <linux/fs.h> #include <linux/types.h> /* loff_t */ #include <linux/stddef.h> /* NULL */ /* * After 2.6.18, filldir and statfs were changed to send 64-bit inode * numbers to user space. Red Hat backported this behavior into a 2.6.17 * kernel. * * This test will fail on a kernel with such a patch. */ static int LinuxDriverFilldir(void *buf, const char *name, int namelen, loff_t offset, ino_t ino, unsigned int d_type) { return 0; } void test(void) { vfs_readdir(NULL, LinuxDriverFilldir, NULL); } #else #error "This test intentionally fails on 2.6.20 and newer kernels." #endif vmhgfs-only/shared/autoconf/statfs1.c 0000444 0000000 0000000 00000002671 13432725347 016660 0 ustar root root /********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) #include <linux/fs.h> /* * Around 2.6.18, the super_block pointer in statfs was changed to a dentry * pointer. Red Hat backported this behavior into a 2.6.17 kernel. * * This test will fail on a kernel with such a patch. */ static int LinuxDriverStatFs(struct super_block *sb, struct kstatfs *stat) { return 0; } struct super_operations super_ops = { .statfs = LinuxDriverStatFs }; #else #error "This test intentionally fails on 2.6.19 and newer kernels." #endif vmhgfs-only/shared/autoconf/dcount.c 0000444 0000000 0000000 00000002603 13432725347 016562 0 ustar root root /********************************************************* * Copyright (C) 2014 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0) #include <linux/dcache.h> /* * After 3.11.0, the dentry d_count field was removed. Red Hat * backported this behavior into a 3.10.0 kernel. * * This test will fail on a kernel with such a patch. */ void test(void) { struct dentry dentry; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) dentry.d_count = 1; #else atomic_set(&dentry.d_count, 1); #endif } #else #error "This test intentionally fails on 3.11.0 or newer kernels." #endif vmhgfs-only/shared/autoconf/dalias1.c 0000444 0000000 0000000 00000003325 13432725347 016606 0 ustar root root /********************************************************* * Copyright (C) 2015-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0) && \ LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0) #include <linux/dcache.h> #include <linux/list.h> /* * After 3.19.0, the dentry d_alias field was moved. Fedora * backported this behavior into earlier kernels. * The type of the d_alias field changed from 3.6 onwards * which was a list head to being a list node. The check * for 3.6 onwards is done separately. * * This test will fail on a kernel with such a patch. */ void test(void) { struct dentry aliasDentry; INIT_LIST_HEAD(&aliasDentry.d_alias); } #else /* * Intentionally passes for earlier than 3.2.0 kernels as d_alias is valid. * * Intentionally passes for 3.6.0 or later kernels as d_alias is a different type. * A separate test with the different type is run for those kernel versions. */ #endif vmhgfs-only/shared/autoconf/inode1.c 0000444 0000000 0000000 00000002703 13432725347 016446 0 ustar root root /********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #include "compat_version.h" #include "compat_autoconf.h" #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) #include <linux/fs.h> #include <linux/stddef.h> /* NULL */ /* * After 2.6.18, inodes were "slimmed". This involved removing the union * that encapsulates inode private data (and using i_private instead), as well * as removing i_blksize. Red Hat backported this behavior into a 2.6.17 * kernel. * * This test will fail on a kernel with such a patch. */ void test(void) { struct inode inode; inode.u.generic_ip = NULL; } #else #error "This test intentionally fails on 2.6.20 and newer kernels." #endif vmhgfs-only/shared/compat_pagemap.h 0000444 0000000 0000000 00000002535 13432725347 016436 0 ustar root root /********************************************************* * Copyright (C) 2009 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_PAGEMAP_H__ # define __COMPAT_PAGEMAP_H__ #include <linux/pagemap.h> /* * AOP_FLAG_NOFS was defined in the same changeset that * grab_cache_page_write_begin() was introduced. */ #ifdef AOP_FLAG_NOFS #define compat_grab_cache_page_write_begin(mapping, index, flags) \ grab_cache_page_write_begin((mapping), (index), (flags)) #else #define compat_grab_cache_page_write_begin(mapping, index, flags) \ __grab_cache_page((mapping), (index)); #endif #endif /* __COMPAT_PAGEMAP_H__ */ vmhgfs-only/shared/compat_mutex.h 0000444 0000000 0000000 00000003475 13432725347 016172 0 ustar root root /********************************************************* * Copyright (C) 2009 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_MUTEX_H__ # define __COMPAT_MUTEX_H__ /* Blocking mutexes were introduced in 2.6.16. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16) #include "compat_semaphore.h" typedef struct semaphore compat_mutex_t; # define compat_define_mutex(_mx) DECLARE_MUTEX(_mx) # define compat_mutex_init(_mx) init_MUTEX(_mx) # define compat_mutex_lock(_mx) down(_mx) # define compat_mutex_lock_interruptible(_mx) down_interruptible(_mx) # define compat_mutex_unlock(_mx) up(_mx) #else #include <linux/mutex.h> typedef struct mutex compat_mutex_t; # define compat_define_mutex(_mx) DEFINE_MUTEX(_mx) # define compat_mutex_init(_mx) mutex_init(_mx) # define compat_mutex_lock(_mx) mutex_lock(_mx) # define compat_mutex_lock_interruptible(_mx) mutex_lock_interruptible(_mx) # define compat_mutex_unlock(_mx) mutex_unlock(_mx) #endif #endif /* __COMPAT_MUTEX_H__ */ vmhgfs-only/shared/compat_string.h 0000444 0000000 0000000 00000003563 13432725347 016334 0 ustar root root /********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_STRING_H__ # define __COMPAT_STRING_H__ #include <linux/string.h> /* * kstrdup was born in 2.6.13. This implementation is almost identical to the * one found there. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) #define compat_kstrdup(s, gfp) kstrdup(s, gfp) #else #define compat_kstrdup(s, gfp) \ ({ \ size_t len; \ char *buf; \ len = strlen(s) + 1; \ buf = kmalloc(len, gfp); \ memcpy(buf, s, len); \ buf; \ }) #endif #endif /* __COMPAT_STRING_H__ */ vmhgfs-only/shared/vmciKernelAPI2.h 0000444 0000000 0000000 00000004240 13432725350 016161 0 ustar root root /********************************************************* * Copyright (C) 2010 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciKernelAPI2.h -- * * Kernel API (v2) exported from the VMCI host and guest drivers. */ #ifndef __VMCI_KERNELAPI_2_H__ #define __VMCI_KERNELAPI_2_H__ #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #include "includeCheck.h" #include "vmciKernelAPI1.h" #if defined __cplusplus extern "C" { #endif /* Define version 2. */ #undef VMCI_KERNEL_API_VERSION #define VMCI_KERNEL_API_VERSION_2 2 #define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_2 /* VMCI Doorbell API. */ #define VMCI_FLAG_DELAYED_CB 0x01 typedef void (*VMCICallback)(void *clientData); int vmci_doorbell_create(VMCIHandle *handle, uint32 flags, VMCIPrivilegeFlags privFlags, VMCICallback notifyCB, void *clientData); int vmci_doorbell_destroy(VMCIHandle handle); int vmci_doorbell_notify(VMCIHandle handle, VMCIPrivilegeFlags privFlags); /* Typedefs for all of the above, used by the IOCTLs and the kernel library. */ typedef int (VMCIDoorbell_CreateFct)(VMCIHandle *, uint32, VMCIPrivilegeFlags, VMCICallback, void *); typedef int (VMCIDoorbell_DestroyFct)(VMCIHandle); typedef int (VMCIDoorbell_NotifyFct)(VMCIHandle, VMCIPrivilegeFlags); #if defined __cplusplus } // extern "C" #endif #endif /* !__VMCI_KERNELAPI_2_H__ */ vmhgfs-only/shared/vmci_kernel_if.h 0000444 0000000 0000000 00000042051 13432725350 016424 0 ustar root root /********************************************************* * Copyright (C) 2006-2016,2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmci_kernel_if.h -- * * This file defines helper functions for VMCI host _and_ guest * kernel code. It must work for Windows, Mac OS, vmkernel, Linux and * Solaris kernels, i.e. using defines where necessary. */ #ifndef _VMCI_KERNEL_IF_H_ #define _VMCI_KERNEL_IF_H_ #if !defined(__linux__) && !defined(_WIN32) && !defined(__APPLE__) && \ !defined(VMKERNEL) # error "Platform not supported." #endif #if defined(_WIN32) # include <ntddk.h> #else #define UNREFERENCED_PARAMETER(P) #endif #if defined(__linux__) && !defined(VMKERNEL) # include "driver-config.h" # include "compat_cred.h" # include "compat_module.h" # include "compat_semaphore.h" # include "compat_spinlock.h" # include "compat_version.h" # include <linux/wait.h> #endif // linux #ifdef __APPLE__ # include <IOKit/IOLib.h> # include <mach/task.h> # include <mach/semaphore.h> # include <sys/kauth.h> #endif #ifdef VMKERNEL # include "splock.h" # include "splock_customRanks.h" # include "semaphore_ext.h" # include "vmkapi.h" # include "world_dist.h" #endif #include "vm_basic_types.h" #include "vmci_defs.h" #if defined(VMKERNEL) # include "list.h" #else # include "dbllnklst.h" #endif #if defined __cplusplus extern "C" { #endif /* Flags for specifying memory type. */ #define VMCI_MEMORY_NORMAL 0x0 #define VMCI_MEMORY_ATOMIC 0x1 #define VMCI_MEMORY_NONPAGED 0x2 /* Platform specific type definitions. */ #if defined(VMKERNEL) # define VMCI_EXPORT_SYMBOL(_SYMBOL) VMK_MODULE_EXPORT_SYMBOL(_SYMBOL); #elif defined(__linux__) # define VMCI_EXPORT_SYMBOL(_symbol) EXPORT_SYMBOL(_symbol); #elif defined(__APPLE__) # define VMCI_EXPORT_SYMBOL(_symbol) __attribute__((visibility("default"))) #else # define VMCI_EXPORT_SYMBOL(_symbol) #endif #if defined(VMKERNEL) typedef MCSLock VMCILock; typedef SP_IRQL VMCILockFlags; typedef Semaphore VMCIEvent; typedef Semaphore VMCIMutex; typedef World_ID VMCIHostVmID; typedef uint32 VMCIHostUser; typedef PPN64 *VMCIQPGuestMem; #elif defined(__linux__) typedef spinlock_t VMCILock; typedef unsigned long VMCILockFlags; typedef wait_queue_head_t VMCIEvent; typedef struct semaphore VMCIMutex; typedef PPN *VMCIPpnList; /* List of PPNs in produce/consume queue. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) typedef kuid_t VMCIHostUser; #else typedef uid_t VMCIHostUser; #endif typedef VA64 VMCIQPGuestMem; #elif defined(__APPLE__) typedef IOLock *VMCILock; typedef unsigned long VMCILockFlags; typedef struct { IOLock *lock; DblLnkLst_Links waiters; int buffered; } VMCIEvent; typedef IOLock *VMCIMutex; typedef void *VMCIPpnList; /* Actually a pointer to the C++ Object IOMemoryDescriptor */ typedef uid_t VMCIHostUser; typedef VA64 *VMCIQPGuestMem; #elif defined(_WIN32) typedef KSPIN_LOCK VMCILock; typedef KIRQL VMCILockFlags; typedef KEVENT VMCIEvent; typedef FAST_MUTEX VMCIMutex; typedef PMDL VMCIPpnList; /* MDL to map the produce/consume queue. */ typedef PSID VMCIHostUser; typedef VA64 *VMCIQPGuestMem; #endif // VMKERNEL /* Callback needed for correctly waiting on events. */ typedef int (*VMCIEventReleaseCB)(void *clientData); /* * Internal locking dependencies within VMCI: * * CONTEXTFIRE < CONTEXT, CONTEXTLIST, EVENT, HASHTABLE * * DOORBELL < HASHTABLE * * QPHIBERNATE < EVENT */ #ifdef VMKERNEL typedef Lock_Rank VMCILockRank; typedef SemaRank VMCISemaRank; #define VMCI_SEMA_RANK_QPHEADER (SEMA_RANK_FS - 1) #define VMCI_LOCK_RANK_MAX_NONBLOCK (MIN(SP_RANK_WAIT, \ SP_RANK_HEAPLOCK_DYNAMIC) - 1) #define VMCI_LOCK_RANK_MAX (SP_RANK_BLOCKABLE_HIGHEST_MAJOR - 2) /* * Determines whether VMCI locks will be blockable or not. If blockable, * all locks will be at or below VMCI_LOCK_RANK_MAX. If not, locks will * instead use VMCI_LOCK_RANK_MAX_NONBLOCK as the maximum. The other * VMCI_LOCK_RANK_XXX values will be rebased to be non-blocking as well * in that case. */ extern Bool vmciBlockableLock; #else typedef unsigned long VMCILockRank; typedef unsigned long VMCISemaRank; #define VMCI_LOCK_RANK_MAX 0x0fff #define VMCI_SEMA_RANK_QPHEADER 0x0fff #endif // VMKERNEL #define VMCI_LOCK_RANK_CONTEXT VMCI_LOCK_RANK_MAX #define VMCI_LOCK_RANK_CONTEXTLIST VMCI_LOCK_RANK_MAX #define VMCI_LOCK_RANK_DATAGRAMVMK VMCI_LOCK_RANK_MAX #define VMCI_LOCK_RANK_EVENT VMCI_LOCK_RANK_MAX #define VMCI_LOCK_RANK_HASHTABLE VMCI_LOCK_RANK_MAX #define VMCI_LOCK_RANK_RESOURCE VMCI_LOCK_RANK_MAX #define VMCI_LOCK_RANK_QPHEADER VMCI_LOCK_RANK_MAX #define VMCI_LOCK_RANK_DOORBELL (VMCI_LOCK_RANK_HASHTABLE - 1) #define VMCI_LOCK_RANK_CONTEXTFIRE (MIN(VMCI_LOCK_RANK_CONTEXT, \ MIN(VMCI_LOCK_RANK_CONTEXTLIST, \ MIN(VMCI_LOCK_RANK_EVENT, \ VMCI_LOCK_RANK_HASHTABLE))) - 1) #define VMCI_LOCK_RANK_QPHIBERNATE (VMCI_LOCK_RANK_EVENT - 1) #define VMCI_LOCK_RANK_PACKET_QP (VMCI_LOCK_RANK_QPHEADER - 1) //#define VMCI_LOCK_RANK_PACKET_QP 0xffd /* For vVol */ #define VMCI_SEMA_RANK_QUEUEPAIRLIST (VMCI_SEMA_RANK_QPHEADER - 1) #define VMCI_SEMA_RANK_GUESTMEM (VMCI_SEMA_RANK_QUEUEPAIRLIST - 1) /* * Host specific struct used for signalling. */ typedef struct VMCIHost { #if defined(VMKERNEL) World_ID vmmWorldID[2]; /* * First one is the active one and the second * one is shadow world during FSR. */ #elif defined(__linux__) wait_queue_head_t waitQueue; #elif defined(__APPLE__) struct Socket *socket; /* vmci Socket object on Mac OS. */ #elif defined(_WIN32) KEVENT *callEvent; /* Ptr to userlevel event used when signalling * new pending guestcalls in kernel. */ #endif } VMCIHost; /* * Guest device port I/O. */ #if defined(__linux__) typedef unsigned short int VMCIIoPort; typedef int VMCIIoHandle; #elif defined(_WIN32) typedef PUCHAR VMCIIoPort; typedef int VMCIIoHandle; #elif defined(__APPLE__) typedef unsigned short int VMCIIoPort; typedef void *VMCIIoHandle; #endif // __APPLE__ void VMCI_ReadPortBytes(VMCIIoHandle handle, VMCIIoPort port, uint8 *buffer, size_t bufferLength); int VMCI_InitLock(VMCILock *lock, char *name, VMCILockRank rank); void VMCI_CleanupLock(VMCILock *lock); void VMCI_GrabLock(VMCILock *lock, VMCILockFlags *flags); void VMCI_ReleaseLock(VMCILock *lock, VMCILockFlags flags); void VMCI_GrabLock_BH(VMCILock *lock, VMCILockFlags *flags); void VMCI_ReleaseLock_BH(VMCILock *lock, VMCILockFlags flags); void VMCIHost_InitContext(VMCIHost *hostContext, uintptr_t eventHnd); void VMCIHost_ReleaseContext(VMCIHost *hostContext); void VMCIHost_SignalCall(VMCIHost *hostContext); void VMCIHost_ClearCall(VMCIHost *hostContext); Bool VMCIHost_WaitForCallLocked(VMCIHost *hostContext, VMCILock *lock, VMCILockFlags *flags, Bool useBH); #ifdef VMKERNEL int VMCIHost_ContextToHostVmID(VMCIHost *hostContext, VMCIHostVmID *hostVmID); int VMCIHost_ContextHasUuid(VMCIHost *hostContext, const char *uuid); void VMCIHost_SetActiveHnd(VMCIHost *hostContext, uintptr_t eventHnd); Bool VMCIHost_RemoveHnd(VMCIHost *hostContext, uintptr_t eventHnd); Bool VMCIHost_IsActiveHnd(VMCIHost *hostContext, uintptr_t eventHnd); void VMCIHost_SetInactiveHnd(VMCIHost *hostContext, uintptr_t eventHnd); uint32 VMCIHost_NumHnds(VMCIHost *hostContext); uintptr_t VMCIHost_GetActiveHnd(VMCIHost *hostContext); void VMCIHost_SignalBitmap(VMCIHost *hostContext); void VMCIHost_SignalBitmapAlways(VMCIHost *hostContext); void VMCIHost_SignalCallAlways(VMCIHost *hostContext); #endif #if defined(_WIN32) /* * On Windows, Driver Verifier will panic() if we leak memory when we are * unloaded. It dumps the leaked blocks for us along with callsites, which * it handily tracks, but if we embed ExAllocate() inside a function, then * the callsite is useless. So make this a macro on this platform only. */ # define VMCI_AllocKernelMem(_sz, _f) \ ExAllocatePoolWithTag((((_f) & VMCI_MEMORY_NONPAGED) ? \ NonPagedPool : PagedPool), \ (_sz), 'MMTC') #else // _WIN32 void *VMCI_AllocKernelMem(size_t size, int flags); #endif // _WIN32 void VMCI_FreeKernelMem(void *ptr, size_t size); int VMCI_CopyToUser(VA64 dst, const void *src, size_t len); Bool VMCIWellKnownID_AllowMap(VMCIId wellKnownID, VMCIPrivilegeFlags privFlags); int VMCIHost_CompareUser(VMCIHostUser *user1, VMCIHostUser *user2); void VMCI_CreateEvent(VMCIEvent *event); void VMCI_DestroyEvent(VMCIEvent *event); void VMCI_SignalEvent(VMCIEvent *event); void VMCI_WaitOnEvent(VMCIEvent *event, VMCIEventReleaseCB releaseCB, void *clientData); #if (defined(__APPLE__) || defined(__linux__) || defined(_WIN32)) && !defined(VMKERNEL) Bool VMCI_WaitOnEventInterruptible(VMCIEvent *event, VMCIEventReleaseCB releaseCB, void *clientData); #endif #if !defined(VMKERNEL) && (defined(__linux__) || defined(_WIN32) || \ defined(__APPLE__)) int VMCI_CopyFromUser(void *dst, VA64 src, size_t len); #endif typedef void (VMCIWorkFn)(void *data); Bool VMCI_CanScheduleDelayedWork(void); int VMCI_ScheduleDelayedWork(VMCIWorkFn *workFn, void *data); int VMCIMutex_Init(VMCIMutex *mutex, char *name, VMCILockRank rank); void VMCIMutex_Destroy(VMCIMutex *mutex); void VMCIMutex_Acquire(VMCIMutex *mutex); void VMCIMutex_Release(VMCIMutex *mutex); #if defined(_WIN32) || defined(__APPLE__) int VMCIKernelIf_Init(void); void VMCIKernelIf_Exit(void); #if defined(_WIN32) void VMCIKernelIf_DrainDelayedWork(void); #endif // _WIN32 #endif // _WIN32 || __APPLE__ #if !defined(VMKERNEL) && \ (defined(__linux__) || defined(_WIN32) || defined(__APPLE__)) void *VMCI_AllocQueue(uint64 size, uint32 flags); void VMCI_FreeQueue(void *q, uint64 size); typedef struct PPNSet { uint64 numProducePages; uint64 numConsumePages; VMCIPpnList producePPNs; VMCIPpnList consumePPNs; Bool initialized; } PPNSet; int VMCI_AllocPPNSet(void *produceQ, uint64 numProducePages, void *consumeQ, uint64 numConsumePages, PPNSet *ppnSet); void VMCI_FreePPNSet(PPNSet *ppnSet); int VMCI_PopulatePPNList(uint8 *callBuf, const PPNSet *ppnSet); #endif struct VMCIQueue; struct PageStoreAttachInfo; struct VMCIQueue *VMCIHost_AllocQueue(uint64 queueSize); void VMCIHost_FreeQueue(struct VMCIQueue *queue, uint64 queueSize); #if defined(VMKERNEL) typedef World_Handle *VMCIGuestMemID; #define INVALID_VMCI_GUEST_MEM_ID NULL #else typedef uint32 VMCIGuestMemID; #define INVALID_VMCI_GUEST_MEM_ID 0 #endif #if defined(VMKERNEL) || defined(__linux__) || defined(_WIN32) || \ defined(__APPLE__) struct QueuePairPageStore; int VMCIHost_RegisterUserMemory(struct QueuePairPageStore *pageStore, struct VMCIQueue *produceQ, struct VMCIQueue *consumeQ); void VMCIHost_UnregisterUserMemory(struct VMCIQueue *produceQ, struct VMCIQueue *consumeQ); int VMCIHost_MapQueues(struct VMCIQueue *produceQ, struct VMCIQueue *consumeQ, uint32 flags); int VMCIHost_UnmapQueues(VMCIGuestMemID gid, struct VMCIQueue *produceQ, struct VMCIQueue *consumeQ); void VMCI_InitQueueMutex(struct VMCIQueue *produceQ, struct VMCIQueue *consumeQ); void VMCI_CleanupQueueMutex(struct VMCIQueue *produceQ, struct VMCIQueue *consumeQ); int VMCI_AcquireQueueMutex(struct VMCIQueue *queue, Bool canBlock); void VMCI_ReleaseQueueMutex(struct VMCIQueue *queue); #else // Below are the guest OS'es without host side support. # define VMCI_InitQueueMutex(_pq, _cq) # define VMCI_CleanupQueueMutex(_pq, _cq) do { } while (0) # define VMCI_AcquireQueueMutex(_q, _cb) VMCI_SUCCESS # define VMCI_ReleaseQueueMutex(_q) do { } while (0) # define VMCIHost_RegisterUserMemory(_ps, _pq, _cq) VMCI_ERROR_UNAVAILABLE # define VMCIHost_UnregisterUserMemory(_pq, _cq) do { } while (0) # define VMCIHost_MapQueues(_pq, _cq, _f) VMCI_SUCCESS # define VMCIHost_UnmapQueues(_gid, _pq, _cq) VMCI_SUCCESS #endif #if defined(VMKERNEL) void VMCIHost_MarkQueuesAvailable(struct VMCIQueue *produceQ, struct VMCIQueue *consumeQ); void VMCIHost_MarkQueuesUnavailable(struct VMCIQueue *produceQ, struct VMCIQueue *consumeQ); int VMCIHost_RevalidateQueues(struct VMCIQueue *produceQ, struct VMCIQueue *consumeQ); #else # define VMCIHost_MarkQueuesAvailable(_q, _p) do { } while (0) # define VMCIHost_MarkQueuesUnavailable(_q, _p) do { } while(0) #endif #if defined(VMKERNEL) || defined(__linux__) void VMCI_LockQueueHeader(struct VMCIQueue *queue); void VMCI_UnlockQueueHeader(struct VMCIQueue *queue); #else # define VMCI_LockQueueHeader(_q) NOT_IMPLEMENTED() # define VMCI_UnlockQueueHeader(_q) NOT_IMPLEMENTED() #endif #if defined(VMKERNEL) int VMCI_QueueHeaderUpdated(struct VMCIQueue *produceQ); #else # define VMCI_QueueHeaderUpdated(_q) VMCI_SUCCESS #endif #if (!defined(VMKERNEL) && defined(__linux__)) || defined(_WIN32) || \ defined(__APPLE__) int VMCIHost_GetUserMemory(VA64 produceUVA, VA64 consumeUVA, struct VMCIQueue *produceQ, struct VMCIQueue *consumeQ); void VMCIHost_ReleaseUserMemory(struct VMCIQueue *produceQ, struct VMCIQueue *consumeQ); #else # define VMCIHost_GetUserMemory(_puva, _cuva, _pq, _cq) VMCI_ERROR_UNAVAILABLE # define VMCIHost_ReleaseUserMemory(_pq, _cq) NOT_IMPLEMENTED() #endif #if defined(_WIN32) Bool VMCI_EnqueueToDevNull(struct VMCIQueue *queue); int VMCI_ConvertToLocalQueue(struct VMCIQueue *queueInfo, struct VMCIQueue *otherQueueInfo, uint64 size, Bool keepContent, void **oldQueue); void VMCI_RevertToNonLocalQueue(struct VMCIQueue *queueInfo, void *nonLocalQueue, uint64 size); void VMCI_FreeQueueBuffer(void *queue, uint64 size); Bool VMCI_CanCreate(void); #else // _WIN32 # define VMCI_EnqueueToDevNull(_q) FALSE # define VMCI_ConvertToLocalQueue(_pq, _cq, _s, _oq, _kc) VMCI_ERROR_UNAVAILABLE # define VMCI_RevertToNonLocalQueue(_q, _nlq, _s) # define VMCI_FreeQueueBuffer(_q, _s) # define VMCI_CanCreate() TRUE #endif // !_WIN32 Bool VMCI_GuestPersonalityActive(void); Bool VMCI_HostPersonalityActive(void); #if defined(VMKERNEL) typedef List_Links VMCIListItem; typedef List_Links VMCIList; # define VMCIList_Init(_l) List_Init(_l) # define VMCIList_InitEntry(_e) List_InitElement(_e) # define VMCIList_Empty(_l) List_IsEmpty(_l) # define VMCIList_Insert(_e, _l) List_Insert(_e, LIST_ATREAR(_l)) # define VMCIList_Remove(_e) List_Remove(_e) # define VMCIList_Scan(_cur, _l) LIST_FORALL(_l, _cur) # define VMCIList_ScanSafe(_cur, _next, _l) LIST_FORALL_SAFE(_l, _cur, _next) # define VMCIList_Entry(_elem, _type, _field) List_Entry(_elem, _type, _field) # define VMCIList_First(_l) (VMCIList_Empty(_l)?NULL:List_First(_l)) #else typedef DblLnkLst_Links VMCIListItem; typedef DblLnkLst_Links VMCIList; # define VMCIList_Init(_l) DblLnkLst_Init(_l) # define VMCIList_InitEntry(_e) DblLnkLst_Init(_e) # define VMCIList_Empty(_l) (!DblLnkLst_IsLinked(_l)) # define VMCIList_Insert(_e, _l) DblLnkLst_LinkLast(_l, _e) # define VMCIList_Remove(_e) DblLnkLst_Unlink1(_e) # define VMCIList_Scan(_cur, _l) DblLnkLst_ForEach(_cur, _l) # define VMCIList_ScanSafe(_cur, _next, _l) DblLnkLst_ForEachSafe(_cur, _next, _l) # define VMCIList_Entry(_elem, _type, _field) DblLnkLst_Container(_elem, _type, _field) # define VMCIList_First(_l) (VMCIList_Empty(_l)?NULL:(_l)->next) #endif #if defined __cplusplus } // extern "C" #endif #endif // _VMCI_KERNEL_IF_H_ vmhgfs-only/shared/vmware_pack_init.h 0000444 0000000 0000000 00000003651 13432725350 016775 0 ustar root root /********************************************************* * Copyright (C) 2002-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __VMWARE_PACK_INIT_H__ # define __VMWARE_PACK_INIT_H__ /* * vmware_pack_init.h -- * * Platform-independent code to make the compiler pack (i.e. have them * occupy the smallest possible space) structure definitions. The following * constructs are known to work --hpreg * * #include "vmware_pack_begin.h" * struct foo { * ... * } * #include "vmware_pack_end.h" * ; * * typedef * #include "vmware_pack_begin.h" * struct foo { * ... * } * #include "vmware_pack_end.h" * foo; */ #ifdef _MSC_VER /* * MSVC 6.0 emits warning 4103 when the pack push and pop pragma pairing is * not balanced within 1 included file. That is annoying because our scheme * is based on the pairing being balanced between 2 included files. * * So we disable this warning, but this is safe because the compiler will also * emit warning 4161 when there is more pops than pushes within 1 main * file --hpreg */ # pragma warning(disable:4103) #elif __GNUC__ #else # error Compiler packing... #endif #endif /* __VMWARE_PACK_INIT_H__ */ vmhgfs-only/shared/vm_basic_defs.h 0000444 0000000 0000000 00000044517 13432725350 016245 0 ustar root root /********************************************************* * Copyright (C) 2003-2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vm_basic_defs.h -- * * Standard macros for VMware source code. */ #ifndef _VM_BASIC_DEFS_H_ #define _VM_BASIC_DEFS_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_VMKDRIVERS #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" #include "vm_basic_types.h" // For INLINE. /* Checks for FreeBSD, filtering out VMKERNEL. */ #if !defined(VMKERNEL) && defined(__FreeBSD__) #define __IS_FREEBSD__ 1 #else #define __IS_FREEBSD__ 0 #endif #define __IS_FREEBSD_VER__(ver) (__IS_FREEBSD__ && __FreeBSD_version >= (ver)) #if defined _WIN32 && defined USERLEVEL #include <stddef.h> /* * We redefine offsetof macro from stddef; make * sure that it's already defined before we do that. */ #include <windows.h> // for Sleep() and LOWORD() etc. #undef GetFreeSpace // Unpollute preprocessor namespace. #endif /* * Simple macros */ #ifndef vmw_offsetof #define vmw_offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER) #endif #if (defined __APPLE__ || defined __FreeBSD__) && \ (!defined KERNEL && !defined _KERNEL && !defined VMKERNEL && !defined __KERNEL__) # include <stddef.h> #else #ifndef offsetof #define VMW_DEFINED_OFFSETOF /* * XXX While the _WIN32 implementation appears to be identical to vmw_offsetof * in terms of behavior, they need to be separate to match verbatim the * definition used by the respective compilers, to avoid a redefinition warning. * * This is necessary until we eliminate the inclusion of <windows.h> above. */ #ifdef _WIN32 #define offsetof(s,m) (size_t)&(((s *)0)->m) /* * We use the builtin offset for gcc/clang, except when we're running under the * vmkernel's GDB macro preprocessor, since gdb doesn't understand * __builtin_offsetof. */ #elif defined __GNUC__ && !defined VMKERNEL_GDB_MACRO_BUILDER #define offsetof __builtin_offsetof #else #define offsetof vmw_offsetof #endif #endif // offsetof #endif // __APPLE__ #define VMW_CONTAINER_OF(ptr, type, member) \ ((type *)((char *)(ptr) - vmw_offsetof(type, member))) #ifndef ARRAYSIZE #define ARRAYSIZE(a) (sizeof (a) / sizeof *(a)) #endif #ifndef MIN #define MIN(_a, _b) (((_a) < (_b)) ? (_a) : (_b)) #endif /* The Solaris 9 cross-compiler complains about these not being used */ #ifndef sun static INLINE int Min(int a, int b) { return a < b ? a : b; } #endif #ifndef MAX #define MAX(_a, _b) (((_a) > (_b)) ? (_a) : (_b)) #endif #ifndef sun static INLINE int Max(int a, int b) { return a > b ? a : b; } #endif #define VMW_CLAMP(x, min, max) \ ((x) < (min) ? (min) : ((x) > (max) ? (max) : (x))) #define ROUNDUP(x,y) (((x) + (y) - 1) / (y) * (y)) #define ROUNDDOWN(x,y) ((x) / (y) * (y)) #define ROUNDUPBITS(x, bits) (((uintptr_t) (x) + MASK(bits)) & ~MASK(bits)) #define ROUNDDOWNBITS(x, bits) ((uintptr_t) (x) & ~MASK(bits)) #define CEILING(x, y) (((x) + (y) - 1) / (y)) #if defined __APPLE__ #include <machine/param.h> #undef MASK #endif /* * The MASK macro behaves badly when given negative numbers or numbers larger * than the highest order bit number (e.g. 32 on a 32-bit machine) as an * argument. The range 0..31 is safe. */ #define MASK(n) ((1 << (n)) - 1) /* make an n-bit mask */ #define MASK64(n) ((CONST64U(1) << (n)) - 1) /* make an n-bit mask */ /* * MASKRANGE64 makes a bit vector starting at bit lo and ending at bit hi. No * checking for lo < hi is done. */ #define MASKRANGE64(hi, lo) (MASK64((hi) - (lo) + 1) << (lo)) /* SIGNEXT64 sign extends a n-bit value to 64-bits. */ #define SIGNEXT64(val, n) (((int64)(val) << (64 - (n))) >> (64 - (n))) #define DWORD_ALIGN(x) ((((x) + 3) >> 2) << 2) #define QWORD_ALIGN(x) ((((x) + 7) >> 3) << 3) #define IMPLIES(a,b) (!(a) || (b)) /* * Not everybody (e.g., the monitor) has NULL */ #ifndef NULL #ifdef __cplusplus #define NULL 0 #else #define NULL ((void *)0) #endif #endif /* * Token concatenation * * The C preprocessor doesn't prescan arguments when they are * concatenated or stringified. So we need extra levels of * indirection to convince the preprocessor to expand its * arguments. */ #define CONC(x, y) x##y #define XCONC(x, y) CONC(x, y) #define XXCONC(x, y) XCONC(x, y) #define MAKESTR(x) #x #define XSTR(x) MAKESTR(x) /* * Wide versions of string constants. */ #ifndef WSTR #define WSTR_(X) L ## X #define WSTR(X) WSTR_(X) #endif /* * Page operations * * It has been suggested that these definitions belong elsewhere * (like x86types.h). However, I deem them common enough * (since even regular user-level programs may want to do * page-based memory manipulation) to be here. * -- edward */ #ifndef PAGE_SHIFT // { #if defined VM_I386 #define PAGE_SHIFT 12 #elif defined __APPLE__ #define PAGE_SHIFT 12 #elif defined VM_ARM_64 #define PAGE_SHIFT 12 #elif defined __arm__ #define PAGE_SHIFT 12 #else #error #endif #endif // } #ifndef PAGE_SIZE #define PAGE_SIZE (1 << PAGE_SHIFT) #endif #ifndef PAGE_MASK #define PAGE_MASK (PAGE_SIZE - 1) #endif #ifndef PAGE_OFFSET #define PAGE_OFFSET(_addr) ((uintptr_t)(_addr) & (PAGE_SIZE - 1)) #endif #ifndef PAGE_NUMBER #define PAGE_NUMBER(_addr) ((uintptr_t)(_addr) / PAGE_SIZE) #endif #ifndef VM_PAGE_BASE #define VM_PAGE_BASE(_addr) ((_addr) & ~(PAGE_SIZE - 1)) #endif #ifndef VM_PAGES_SPANNED #define VM_PAGES_SPANNED(_addr, _size) \ ((((_addr) & (PAGE_SIZE - 1)) + (_size) + (PAGE_SIZE - 1)) >> PAGE_SHIFT) #endif #ifndef BYTES_2_PAGES #define BYTES_2_PAGES(_nbytes) ((_nbytes) >> PAGE_SHIFT) #endif #ifndef PAGES_2_BYTES #define PAGES_2_BYTES(_npages) (((uint64)(_npages)) << PAGE_SHIFT) #endif #ifndef MBYTES_SHIFT #define MBYTES_SHIFT 20 #endif #ifndef MBYTES_2_PAGES #define MBYTES_2_PAGES(_nbytes) \ ((uint64)(_nbytes) << (MBYTES_SHIFT - PAGE_SHIFT)) #endif #ifndef PAGES_2_MBYTES #define PAGES_2_MBYTES(_npages) ((_npages) >> (MBYTES_SHIFT - PAGE_SHIFT)) #endif #ifndef ROUNDUP_PAGES_2_MBYTES #define ROUNDUP_PAGES_2_MBYTES(_npages) \ (((_npages) + MASK(MBYTES_SHIFT - PAGE_SHIFT)) >> (MBYTES_SHIFT - PAGE_SHIFT)) #endif #ifndef ROUNDDOWN_PAGES_2_MBYTES #define ROUNDDOWN_PAGES_2_MBYTES(_npages) \ ((_npages) >> (MBYTES_SHIFT - PAGE_SHIFT)) #endif #ifndef GBYTES_2_PAGES #define GBYTES_2_PAGES(_nbytes) ((uint64)(_nbytes) << (30 - PAGE_SHIFT)) #endif #ifndef PAGES_2_GBYTES #define PAGES_2_GBYTES(_npages) ((_npages) >> (30 - PAGE_SHIFT)) #endif #ifndef BYTES_2_MBYTES #define BYTES_2_MBYTES(_nbytes) ((_nbytes) >> MBYTES_SHIFT) #endif #ifndef MBYTES_2_BYTES #define MBYTES_2_BYTES(_nbytes) ((uint64)(_nbytes) << MBYTES_SHIFT) #endif #ifndef BYTES_2_GBYTES #define BYTES_2_GBYTES(_nbytes) ((_nbytes) >> 30) #endif #ifndef GBYTES_2_BYTES #define GBYTES_2_BYTES(_nbytes) ((uint64)(_nbytes) << 30) #endif #ifndef VM_PAE_LARGE_PAGE_SHIFT #define VM_PAE_LARGE_PAGE_SHIFT 21 #endif #ifndef VM_PAE_LARGE_PAGE_SIZE #define VM_PAE_LARGE_PAGE_SIZE (1 << VM_PAE_LARGE_PAGE_SHIFT) #endif #ifndef VM_PAE_LARGE_PAGE_MASK #define VM_PAE_LARGE_PAGE_MASK (VM_PAE_LARGE_PAGE_SIZE - 1) #endif #ifndef VM_PAE_LARGE_2_SMALL_PAGES #define VM_PAE_LARGE_2_SMALL_PAGES (BYTES_2_PAGES(VM_PAE_LARGE_PAGE_SIZE)) #endif #ifndef VM_PAE_LARGE_2_BYTES #define VM_PAE_LARGE_2_BYTES(_2mbytes) ((_2mbytes) << VM_PAE_LARGE_PAGE_SHIFT) #endif #ifndef VM_1GB_PAGE_SHIFT #define VM_1GB_PAGE_SHIFT 30 #endif #ifndef VM_1GB_PAGE_SIZE #define VM_1GB_PAGE_SIZE (1 << VM_1GB_PAGE_SHIFT) #endif #ifndef VM_1GB_2_PAGES #define VM_1GB_2_PAGES (BYTES_2_PAGES(VM_1GB_PAGE_SIZE)) #endif #ifndef VM_1GB_2_PDIRS #define VM_1GB_2_PDIRS (VM_1GB_PAGE_SIZE / VM_PAE_LARGE_PAGE_SIZE) #endif /* * Word operations */ #ifndef LOWORD #define LOWORD(_dw) ((_dw) & 0xffff) #endif #ifndef HIWORD #define HIWORD(_dw) (((_dw) >> 16) & 0xffff) #endif #ifndef LOBYTE #define LOBYTE(_w) ((_w) & 0xff) #endif #ifndef HIBYTE #define HIBYTE(_w) (((_w) >> 8) & 0xff) #endif #ifndef HIDWORD #define HIDWORD(_qw) ((uint32)((_qw) >> 32)) #endif #ifndef LODWORD #define LODWORD(_qw) ((uint32)(_qw)) #endif #define QWORD(_hi, _lo) ((((uint64)(_hi)) << 32) | ((uint32)(_lo))) /* * Deposit a field _src at _pos bits from the right, * with a length of _len, into the integer _target. */ #define DEPOSIT_BITS(_src,_pos,_len,_target) { \ unsigned mask = ((1 << _len) - 1); \ unsigned shiftedmask = ((1 << _len) - 1) << _pos; \ _target = (_target & ~shiftedmask) | ((_src & mask) << _pos); \ } /* * Get return address. */ #ifdef _MSC_VER #ifdef __cplusplus extern "C" #endif void *_ReturnAddress(void); #pragma intrinsic(_ReturnAddress) #define GetReturnAddress() _ReturnAddress() #elif __GNUC__ #define GetReturnAddress() __builtin_return_address(0) #endif #ifdef __GNUC__ #ifndef sun /* * A bug in __builtin_frame_address was discovered in gcc 4.1.1, and * fixed in 4.2.0; assume it originated in 4.0. PR 147638 and 554369. */ #if !(__GNUC__ == 4 && (__GNUC_MINOR__ == 0 || __GNUC_MINOR__ == 1)) #define GetFrameAddr() __builtin_frame_address(0) #endif #endif // sun #endif // __GNUC__ /* * Data prefetch was added in gcc 3.1.1 * http://www.gnu.org/software/gcc/gcc-3.1/changes.html */ #ifdef __GNUC__ # if ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ > 1) || \ (__GNUC__ == 3 && __GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ >= 1)) # define PREFETCH_R(var) __builtin_prefetch((var), 0 /* read */, \ 3 /* high temporal locality */) # define PREFETCH_W(var) __builtin_prefetch((var), 1 /* write */, \ 3 /* high temporal locality */) # else # define PREFETCH_R(var) ((void)(var)) # define PREFETCH_W(var) ((void)(var)) # endif #endif /* __GNUC__ */ #ifdef USERLEVEL // { /* * Standardize some Posix names on Windows. */ #ifdef _WIN32 // { /* Conflict with definition of Visual Studio 2015 */ #if (_MSC_VER < 1900) #define snprintf _snprintf #endif #define strtok_r strtok_s #if (_MSC_VER < 1500) #define vsnprintf _vsnprintf #endif typedef int uid_t; typedef int gid_t; static INLINE void sleep(unsigned int sec) { Sleep(sec * 1000); } static INLINE int usleep(unsigned long usec) { Sleep(CEILING(usec, 1000)); return 0; } typedef int pid_t; #define F_OK 0 #define X_OK 1 #define W_OK 2 #define R_OK 4 #endif // } /* * Macro for username comparison. */ #ifdef _WIN32 // { #define USERCMP(x,y) Str_Strcasecmp(x,y) #else #define USERCMP(x,y) strcmp(x,y) #endif // } #endif // } #ifndef va_copy #ifdef _WIN32 /* * Windows needs va_copy. This works for both 32 and 64-bit Windows * based on inspection of how varags.h from the Visual C CRTL is * implemented. (Future versions of the RTL may break this). */ #define va_copy(dest, src) ((dest) = (src)) #elif defined(__APPLE__) && defined(KERNEL) // The macOS kernel SDK defines va_copy in stdarg.h. #include <stdarg.h> #elif defined(__GNUC__) && (__GNUC__ < 3) /* * Old versions of gcc recognize __va_copy, but not va_copy. */ #define va_copy(dest, src) __va_copy(dest, src) #endif // _WIN32 #endif // va_copy /* * This one is outside USERLEVEL because it's used by * files compiled into the Windows hgfs driver or the display * driver. */ #if defined(_WIN32) && defined(_MSC_VER) #define PATH_MAX 256 #ifndef strcasecmp #define strcasecmp(_s1,_s2) _stricmp((_s1),(_s2)) #endif #ifndef strncasecmp #define strncasecmp(_s1,_s2,_n) _strnicmp((_s1),(_s2),(_n)) #endif #endif /* * Convenience definitions of unicode characters. */ #ifndef UTF8_ELLIPSIS #define UTF8_ELLIPSIS "\xe2\x80\xa6" #endif /* * Convenience macros and definitions. Can often be used instead of #ifdef. */ #undef ARM64_ONLY #ifdef VM_ARM_64 #define ARM64_ONLY(x) x #else #define ARM64_ONLY(x) #endif #undef X86_ONLY #ifdef VM_X86_ANY #define X86_ONLY(x) x #else #define X86_ONLY(x) #endif #undef DEBUG_ONLY #ifdef VMX86_DEBUG #define vmx86_debug 1 #define DEBUG_ONLY(x) x #else #define vmx86_debug 0 #define DEBUG_ONLY(x) #endif #ifdef VMX86_STATS #define vmx86_stats 1 #define STATS_ONLY(x) x #else #define vmx86_stats 0 #define STATS_ONLY(x) #endif #ifdef VMX86_DEVEL #define vmx86_devel 1 #define DEVEL_ONLY(x) x #else #define vmx86_devel 0 #define DEVEL_ONLY(x) #endif #ifdef VMX86_LOG #define vmx86_log 1 #define LOG_ONLY(x) x #else #define vmx86_log 0 #define LOG_ONLY(x) #endif #ifdef VMX86_BETA #define vmx86_beta 1 #define BETA_ONLY(x) x #else #define vmx86_beta 0 #define BETA_ONLY(x) #endif #ifdef VMX86_RELEASE #define vmx86_release 1 #define RELEASE_ONLY(x) x #else #define vmx86_release 0 #define RELEASE_ONLY(x) #endif #ifdef VMX86_SERVER #define vmx86_server 1 #define SERVER_ONLY(x) x #define HOSTED_ONLY(x) #else #define vmx86_server 0 #define SERVER_ONLY(x) #define HOSTED_ONLY(x) x #endif #ifdef VMKERNEL #define vmkernel 1 #define VMKERNEL_ONLY(x) x #else #define vmkernel 0 #define VMKERNEL_ONLY(x) #endif #ifdef _WIN32 #define WIN32_ONLY(x) x #define POSIX_ONLY(x) #define vmx86_win32 1 #else #define WIN32_ONLY(x) #define POSIX_ONLY(x) x #define vmx86_win32 0 #endif #ifdef __linux__ #define vmx86_linux 1 #define LINUX_ONLY(x) x #else #define vmx86_linux 0 #define LINUX_ONLY(x) #endif #ifdef __APPLE__ #define vmx86_apple 1 #define APPLE_ONLY(x) x #else #define vmx86_apple 0 #define APPLE_ONLY(x) #endif #if defined(__APPLE__) && defined(VMW_APPLE_SANDBOX) #define vmw_apple_sandbox 1 #else #define vmw_apple_sandbox 0 #endif #if defined(__APPLE__) && defined(VMW_APPLE_APP_STORE) #define vmw_apple_app_store 1 #else #define vmw_apple_app_store 0 #endif #ifdef VMM #define VMM_ONLY(x) x #else #define VMM_ONLY(x) #endif #if defined(VMM) || defined(VMKERNEL) #define USER_ONLY(x) #else #define USER_ONLY(x) x #endif #ifdef _WIN32 #define VMW_INVALID_HANDLE INVALID_HANDLE_VALUE #else #define VMW_INVALID_HANDLE (-1LL) #endif #ifdef _WIN32 #define fsync(fd) _commit(fd) #define fileno(f) _fileno(f) #else #endif /* * Debug output macros for Windows drivers (the Eng variant is for * display/printer drivers only. */ #ifdef _WIN32 #ifndef USES_OLD_WINDDK #if defined(VMX86_LOG) #ifdef _WIN64 #define WinDrvPrint(arg, ...) DbgPrintEx(DPFLTR_IHVDRIVER_ID, (ULONG)~0, arg, __VA_ARGS__) #else #define WinDrvPrint(arg, ...) DbgPrint(arg, __VA_ARGS__) #endif #define WinDrvEngPrint(arg, ...) EngDbgPrint(arg, __VA_ARGS__) #else #define WinDrvPrint(arg, ...) #define WinDrvEngPrint(arg, ...) #endif #endif #endif // _WIN32 #ifdef HOSTED_LG_PG #define hosted_lg_pg 1 #else #define hosted_lg_pg 0 #endif /* * Use to initialize cbSize for this structure to preserve < Vista * compatibility. */ #define NONCLIENTMETRICSINFO_V1_SIZE CCSIZEOF_STRUCT(NONCLIENTMETRICS, \ lfMessageFont) /* This is not intended to be thread-safe. */ #define DO_ONCE(code) \ do { \ static Bool _doOnceDone = FALSE; \ if (UNLIKELY(!_doOnceDone)) { \ _doOnceDone = TRUE; \ code; \ } \ } while (0) /* * Bug 827422 and 838523. */ #if defined __GNUC__ && __GNUC__ >= 4 #define VISIBILITY_HIDDEN __attribute__((visibility("hidden"))) #else #define VISIBILITY_HIDDEN /* nothing */ #endif /* * Bitfield extraction. */ #define EXTRACT_BITSLICE32(_val , _lsb, _msb) \ (((uint32)(_val) << (31 - (_msb))) >> ((31 - (_msb)) + (_lsb))) #define EXTRACT_BITFIELD32(_val, _pos, _len) \ EXTRACT_BITSLICE32((_val), (_pos), ((_pos) + (_len) - 1)) #define EXTRACT_BITSLICE64(_val, _lsb, _msb) \ (((uint64)(_val) << (63 - (_msb))) >> ((63 - (_msb)) + (_lsb))) #define EXTRACT_BITFIELD64(_val, _pos, _len) \ EXTRACT_BITSLICE64((_val), (_pos), ((_pos) + (_len) - 1)) /* * Typical cache line size. Use this for aligning structures to cache * lines for performance, but do not rely on it for correctness. * * On x86, all current processors newer than P4 have 64-byte lines, * but P4 had 128. * * On ARM, the line size can vary between cores. 64-byte lines are * common, but either larger or smaller powers of two are possible. */ #define CACHELINE_SIZE 64 #define CACHELINE_SHIFT 6 #define CACHELINE_ALIGNMENT_MASK (CACHELINE_SIZE - 1) /* * Bits to bytes sizes. */ #define SIZE_8BIT 1 #define SIZE_16BIT 2 #define SIZE_24BIT 3 #define SIZE_32BIT 4 #define SIZE_48BIT 6 #define SIZE_64BIT 8 #define SIZE_80BIT 10 #define SIZE_128BIT 16 #define SIZE_256BIT 32 #define SIZE_512BIT 64 /* * Allocate a variable of type _type, aligned to _align bytes, returning a * pointer to the variable in _var. Potentially _align - 1 bytes may be * wasted. On x86, GCC 6.3.0 behaves sub-optimally when variables are declared * on the stack using the aligned attribute, so this pattern is preferred. * See PRs 1795155, 1819963. */ #define WITH_PTR_TO_ALIGNED_VAR(_type, _align, _var) \ do { \ uint8 _buf_##_var[sizeof(_type) + (_align) - 1]; \ _type *_var = (_type *) ((uintptr_t)(_buf_##_var + (_align) - 1) & \ ~((uintptr_t) ((_align) - 1))); #define END_PTR_TO_ALIGNED_VAR \ } while (0) #endif // ifndef _VM_BASIC_DEFS_H_ vmhgfs-only/shared/backdoor_def.h 0000444 0000000 0000000 00000033730 13432725350 016056 0 ustar root root /********************************************************* * Copyright (C) 1998-2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * backdoor_def.h -- * * This contains backdoor defines that can be included from * an assembly language file. */ #ifndef _BACKDOOR_DEF_H_ #define _BACKDOOR_DEF_H_ #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMKERNEL #include "includeCheck.h" #if defined __cplusplus extern "C" { #endif /* * If you want to add a new low-level backdoor call for a guest userland * application, please consider using the GuestRpc mechanism instead. --hpreg */ #define BDOOR_MAGIC 0x564D5868 /* Low-bandwidth backdoor port. --hpreg */ #define BDOOR_PORT 0x5658 #define BDOOR_CMD_GETMHZ 1 /* * BDOOR_CMD_APMFUNCTION is used by: * * o The FrobOS code, which instead should either program the virtual chipset * (like the new BIOS code does, matthias offered to implement that), or not * use any VM-specific code (which requires that we correctly implement * "power off on CLI HLT" for SMP VMs, boris offered to implement that) * * o The old BIOS code, which will soon be jettisoned * * --hpreg */ #define BDOOR_CMD_APMFUNCTION 2 /* CPL0 only. */ #define BDOOR_CMD_GETDISKGEO 3 #define BDOOR_CMD_GETPTRLOCATION 4 #define BDOOR_CMD_SETPTRLOCATION 5 #define BDOOR_CMD_GETSELLENGTH 6 #define BDOOR_CMD_GETNEXTPIECE 7 #define BDOOR_CMD_SETSELLENGTH 8 #define BDOOR_CMD_SETNEXTPIECE 9 #define BDOOR_CMD_GETVERSION 10 #define BDOOR_CMD_GETDEVICELISTELEMENT 11 #define BDOOR_CMD_TOGGLEDEVICE 12 #define BDOOR_CMD_GETGUIOPTIONS 13 #define BDOOR_CMD_SETGUIOPTIONS 14 #define BDOOR_CMD_GETSCREENSIZE 15 #define BDOOR_CMD_MONITOR_CONTROL 16 /* Disabled by default. */ #define BDOOR_CMD_GETHWVERSION 17 #define BDOOR_CMD_OSNOTFOUND 18 /* CPL0 only. */ #define BDOOR_CMD_GETUUID 19 #define BDOOR_CMD_GETMEMSIZE 20 //#define BDOOR_CMD_HOSTCOPY 21 /* Not in use. Was devel only. */ //#define BDOOR_CMD_SERVICE_VM 22 /* Not in use. Never shipped. */ #define BDOOR_CMD_GETTIME 23 /* Deprecated -> GETTIMEFULL. */ #define BDOOR_CMD_STOPCATCHUP 24 #define BDOOR_CMD_PUTCHR 25 /* Disabled by default. */ #define BDOOR_CMD_ENABLE_MSG 26 /* Devel only. */ //#define BDOOR_CMD_GOTO_TCL 27 /* Not in use. Was devel only */ #define BDOOR_CMD_INITPCIOPROM 28 /* CPL 0 only. */ //#define BDOOR_CMD_INT13 29 /* Not in use. */ #define BDOOR_CMD_MESSAGE 30 #define BDOOR_CMD_SIDT 31 #define BDOOR_CMD_SGDT 32 #define BDOOR_CMD_SLDT_STR 33 #define BDOOR_CMD_ISACPIDISABLED 34 //#define BDOOR_CMD_TOE 35 /* Not in use. */ #define BDOOR_CMD_ISMOUSEABSOLUTE 36 #define BDOOR_CMD_PATCH_SMBIOS_STRUCTS 37 /* CPL 0 only. */ #define BDOOR_CMD_MAPMEM 38 /* Devel only */ #define BDOOR_CMD_ABSPOINTER_DATA 39 #define BDOOR_CMD_ABSPOINTER_STATUS 40 #define BDOOR_CMD_ABSPOINTER_COMMAND 41 //#define BDOOR_CMD_TIMER_SPONGE 42 /* Not in use. */ #define BDOOR_CMD_PATCH_ACPI_TABLES 43 /* CPL 0 only. */ //#define BDOOR_CMD_DEVEL_FAKEHARDWARE 44 /* Not in use. */ #define BDOOR_CMD_GETHZ 45 #define BDOOR_CMD_GETTIMEFULL 46 //#define BDOOR_CMD_STATELOGGER 47 /* Not in use. */ #define BDOOR_CMD_CHECKFORCEBIOSSETUP 48 /* CPL 0 only. */ #define BDOOR_CMD_LAZYTIMEREMULATION 49 /* CPL 0 only. */ #define BDOOR_CMD_BIOSBBS 50 /* CPL 0 only. */ //#define BDOOR_CMD_VASSERT 51 /* Not in use. */ #define BDOOR_CMD_ISGOSDARWIN 52 #define BDOOR_CMD_DEBUGEVENT 53 #define BDOOR_CMD_OSNOTMACOSXSERVER 54 /* CPL 0 only. */ #define BDOOR_CMD_GETTIMEFULL_WITH_LAG 55 #define BDOOR_CMD_ACPI_HOTPLUG_DEVICE 56 /* Devel only. */ #define BDOOR_CMD_ACPI_HOTPLUG_MEMORY 57 /* Devel only. */ #define BDOOR_CMD_ACPI_HOTPLUG_CBRET 58 /* Devel only. */ //#define BDOOR_CMD_GET_HOST_VIDEO_MODES 59 /* Not in use. */ #define BDOOR_CMD_ACPI_HOTPLUG_CPU 60 /* Devel only. */ //#define BDOOR_CMD_USB_HOTPLUG_MOUSE 61 /* Not in use. Never shipped. */ #define BDOOR_CMD_XPMODE 62 /* CPL 0 only. */ #define BDOOR_CMD_NESTING_CONTROL 63 #define BDOOR_CMD_FIRMWARE_INIT 64 /* CPL 0 only. */ #define BDOOR_CMD_FIRMWARE_ACPI_SERVICES 65 /* CPL 0 only. */ # define BDOOR_CMD_FAS_GET_TABLE_SIZE 0 # define BDOOR_CMD_FAS_GET_TABLE_DATA 1 # define BDOOR_CMD_FAS_GET_PLATFORM_NAME 2 # define BDOOR_CMD_FAS_GET_PCIE_OSC_MASK 3 # define BDOOR_CMD_FAS_GET_APIC_ROUTING 4 # define BDOOR_CMD_FAS_GET_TABLE_SKIP 5 # define BDOOR_CMD_FAS_GET_SLEEP_ENABLES 6 # define BDOOR_CMD_FAS_GET_HARD_RESET_ENABLE 7 # define BDOOR_CMD_FAS_GET_MOUSE_HID 8 # define BDOOR_CMD_FAS_GET_SMBIOS_VERSION 9 # define BDOOR_CMD_FAS_GET_64BIT_PCI_HOLE_SIZE 10 //#define BDOOR_CMD_FAS_GET_NVDIMM_FMT_CODE 11 /* Not in use. Never shipped. */ # define BDOOR_CMD_FAS_SRP_ENABLED 12 # define BDOOR_CMD_FAS_EXIT_BOOT_SERVICES 13 #define BDOOR_CMD_SENDPSHAREHINTS 66 /* Not in use. Deprecated. */ #define BDOOR_CMD_ENABLE_USB_MOUSE 67 #define BDOOR_CMD_GET_VCPU_INFO 68 # define BDOOR_CMD_VCPU_SLC64 0 # define BDOOR_CMD_VCPU_SYNC_VTSCS 1 # define BDOOR_CMD_VCPU_HV_REPLAY_OK 2 # define BDOOR_CMD_VCPU_LEGACY_X2APIC_OK 3 # define BDOOR_CMD_VCPU_MMIO_HONORS_PAT 4 # define BDOOR_CMD_VCPU_RESERVED 31 #define BDOOR_CMD_EFI_SERIALCON_CONFIG 69 /* CPL 0 only. */ #define BDOOR_CMD_BUG328986 70 /* CPL 0 only. */ #define BDOOR_CMD_FIRMWARE_ERROR 71 /* CPL 0 only. */ # define BDOOR_CMD_FE_INSUFFICIENT_MEM 0 # define BDOOR_CMD_FE_EXCEPTION 1 # define BDOOR_CMD_FE_SGX 2 # define BDOOR_CMD_FE_PCI_MMIO 3 #define BDOOR_CMD_VMK_INFO 72 #define BDOOR_CMD_EFI_BOOT_CONFIG 73 /* CPL 0 only. */ # define BDOOR_CMD_EBC_LEGACYBOOT_ENABLED 0 # define BDOOR_CMD_EBC_GET_ORDER 1 # define BDOOR_CMD_EBC_SHELL_ACTIVE 2 # define BDOOR_CMD_EBC_GET_NETWORK_BOOT_PROTOCOL 3 # define BDOOR_CMD_EBC_QUICKBOOT_ENABLED 4 # define BDOOR_CMD_EBC_GET_PXE_ARCH 5 #define BDOOR_CMD_GET_HW_MODEL 74 /* CPL 0 only. */ #define BDOOR_CMD_GET_SVGA_CAPABILITIES 75 /* CPL 0 only. */ #define BDOOR_CMD_GET_FORCE_X2APIC 76 /* CPL 0 only */ #define BDOOR_CMD_SET_PCI_HOLE 77 /* CPL 0 only */ #define BDOOR_CMD_GET_PCI_HOLE 78 /* CPL 0 only */ #define BDOOR_CMD_GET_PCI_BAR 79 /* CPL 0 only */ #define BDOOR_CMD_SHOULD_GENERATE_SYSTEMID 80 /* CPL 0 only */ #define BDOOR_CMD_READ_DEBUG_FILE 81 /* Devel only. */ #define BDOOR_CMD_SCREENSHOT 82 /* Devel only. */ #define BDOOR_CMD_INJECT_KEY 83 /* Devel only. */ #define BDOOR_CMD_INJECT_MOUSE 84 /* Devel only. */ #define BDOOR_CMD_MKS_GUEST_STATS 85 /* CPL 0 only. */ # define BDOOR_CMD_MKSGS_RESET 0 # define BDOOR_CMD_MKSGS_ADD_PPN 1 # define BDOOR_CMD_MKSGS_REMOVE_PPN 2 #define BDOOR_CMD_ABSPOINTER_RESTRICT 86 #define BDOOR_CMD_GUEST_INTEGRITY 87 # define BDOOR_CMD_GI_GET_CAPABILITIES 0 # define BDOOR_CMD_GI_SETUP_ENTRY_POINT 1 # define BDOOR_CMD_GI_SETUP_ALERTS 2 # define BDOOR_CMD_GI_SETUP_STORE 3 # define BDOOR_CMD_GI_SETUP_EVENT_RING 4 # define BDOOR_CMD_GI_SETUP_NON_FAULT_READ 5 # define BDOOR_CMD_GI_ENTER_INTEGRITY_MODE 6 # define BDOOR_CMD_GI_EXIT_INTEGRITY_MODE 7 # define BDOOR_CMD_GI_RESET_INTEGRITY_MODE 8 # define BDOOR_CMD_GI_GET_EVENT_RING_STATE 9 # define BDOOR_CMD_GI_CONSUME_RING_EVENTS 10 # define BDOOR_CMD_GI_WATCH_MAPPINGS_START 11 # define BDOOR_CMD_GI_WATCH_MAPPINGS_STOP 12 # define BDOOR_CMD_GI_CHECK_MAPPINGS_NOW 13 # define BDOOR_CMD_GI_WATCH_PPNS_START 14 # define BDOOR_CMD_GI_WATCH_PPNS_STOP 15 # define BDOOR_CMD_GI_SEND_MSG 16 # define BDOOR_CMD_GI_TEST_READ_MOB 128 # define BDOOR_CMD_GI_TEST_ADD_EVENT 129 # define BDOOR_CMD_GI_TEST_MAPPING 130 # define BDOOR_CMD_GI_TEST_PPN 131 # define BDOOR_CMD_GI_MAX 131 #define BDOOR_CMD_MKSSTATS_SNAPSHOT 88 /* Devel only. */ # define BDOOR_CMD_MKSSTATS_START 0 # define BDOOR_CMD_MKSSTATS_STOP 1 #define BDOOR_CMD_SECUREBOOT 89 #define BDOOR_CMD_COPY_PHYSMEM 90 /* Devel only. */ #define BDOOR_CMD_STEALCLOCK 91 /* CPL 0 only. */ # define BDOOR_STEALCLOCK_STATUS_DISABLED 0 # define BDOOR_STEALCLOCK_STATUS_ENABLED 1 #define BDOOR_CMD_GUEST_PAGE_HINTS 92 /* CPL 0 only */ #define BDOOR_CMD_FIRMWARE_UPDATE 93 /* CPL 0 only. */ # define BDOOR_CMD_FU_GET_HOST_VERSION 0 # define BDOOR_CMD_FU_UPDATE_FROM_HOST 1 # define BDOOR_CMD_FU_LOCK 2 #define BDOOR_CMD_FUZZER_HELPER 94 /* Devel only. */ # define BDOOR_CMD_FUZZER_INIT 0 # define BDOOR_CMD_FUZZER_NEXT 1 #define BDOOR_CMD_PUTCHR12 95 #define BDOOR_CMD_MAX 96 /* * IMPORTANT NOTE: When modifying the behavior of an existing backdoor command, * you must adhere to the semantics expected by the oldest Tools who use that * command. Specifically, do not alter the way in which the command modifies * the registers. Otherwise backwards compatibility will suffer. */ /* Nesting control operations */ #define NESTING_CONTROL_RESTRICT_BACKDOOR 0 #define NESTING_CONTROL_OPEN_BACKDOOR 1 #define NESTING_CONTROL_QUERY 2 #define NESTING_CONTROL_MAX 2 /* EFI Boot Order options, nibble-sized. */ #define EFI_BOOT_ORDER_TYPE_EFI 0x0 #define EFI_BOOT_ORDER_TYPE_LEGACY 0x1 #define EFI_BOOT_ORDER_TYPE_NONE 0xf #define BDOOR_NETWORK_BOOT_PROTOCOL_NONE 0x0 #define BDOOR_NETWORK_BOOT_PROTOCOL_IPV4 0x1 #define BDOOR_NETWORK_BOOT_PROTOCOL_IPV6 0x2 #define BDOOR_SECUREBOOT_STATUS_DISABLED 0xFFFFFFFFUL #define BDOOR_SECUREBOOT_STATUS_APPROVED 1 #define BDOOR_SECUREBOOT_STATUS_DENIED 2 /* High-bandwidth backdoor port. --hpreg */ #define BDOORHB_PORT 0x5659 #define BDOORHB_CMD_MESSAGE 0 #define BDOORHB_CMD_VASSERT 1 #define BDOORHB_CMD_MAX 2 /* * There is another backdoor which allows access to certain TSC-related * values using otherwise illegal PMC indices when the pseudo_perfctr * control flag is set. */ #define BDOOR_PMC_HW_TSC 0x10000 #define BDOOR_PMC_REAL_NS 0x10001 #define BDOOR_PMC_APPARENT_NS 0x10002 #define BDOOR_PMC_PSEUDO_TSC 0x10003 #define IS_BDOOR_PMC(index) (((index) | 3) == 0x10003) #define BDOOR_CMD(ecx) ((ecx) & 0xffff) /* Sub commands for BDOOR_CMD_VMK_INFO */ #define BDOOR_CMD_VMK_INFO_ENTRY 1 /* * Current format for the guest page hints is: * * Arg0: BDOOR_MAGIC, Arg3: BDOOR_PORT * * Arg1: (rbx on x86) * * 0 64 * | PPN | * * Arg2: (rcx on x86) * * 0 16 32 64 * | Command | Type | Reserved | * * Arg4: (rsi on x86) * * 0 16 64 * | numPages | Reserved | * */ #define BDOOR_GUEST_PAGE_HINTS_NOT_SUPPORTED ((unsigned)-1) #define BDOOR_GUEST_PAGE_HINTS_MAX_PAGES (0xffff) #define BDOOR_GUEST_PAGE_HINTS_TYPE_PSHARE (0) #define BDOOR_GUEST_PAGE_HINTS_TYPE(reg) (((reg) >> 16) & 0xffff) #ifdef VMM /* *---------------------------------------------------------------------- * * Backdoor_CmdRequiresFullyValidVCPU -- * * A few backdoor commands require the full VCPU to be valid * (including GDTR, IDTR, TR and LDTR). The rest get read/write * access to GPRs and read access to Segment registers (selectors). * * Result: * True iff VECX contains a command that require the full VCPU to * be valid. * *---------------------------------------------------------------------- */ static INLINE Bool Backdoor_CmdRequiresFullyValidVCPU(unsigned cmd) { return cmd == BDOOR_CMD_SIDT || cmd == BDOOR_CMD_SGDT || cmd == BDOOR_CMD_SLDT_STR; } #endif #ifdef VM_ARM_64 #define BDOOR_ARM64_LB_PORT (BDOOR_PORT) #define BDOOR_ARM64_HB_PORT_IN (BDOORHB_PORT) #define BDOOR_ARM64_HB_PORT_OUT (BDOORHB_PORT +1) #define BDOOR_ARG0 REG_X0 #define BDOOR_ARG1 REG_X1 #define BDOOR_ARG2 REG_X2 #define BDOOR_ARG3 REG_X3 #define BDOOR_ARG4 REG_X4 #define BDOOR_ARG5 REG_X5 #define BDOOR_ARG6 REG_X6 #else #define BDOOR_ARG0 REG_RAX #define BDOOR_ARG1 REG_RBX #define BDOOR_ARG2 REG_RCX #define BDOOR_ARG3 REG_RDX #define BDOOR_ARG4 REG_RSI #define BDOOR_ARG5 REG_RDI #define BDOOR_ARG6 REG_RBP #endif #if defined __cplusplus } #endif #endif // _BACKDOOR_DEF_H_ vmhgfs-only/shared/compat_ethtool.h 0000444 0000000 0000000 00000003662 13432725347 016504 0 ustar root root /********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef _COMPAT_ETHTOOL_H #define _COMPAT_ETHTOOL_H /* * ethtool is a userspace utility for getting and setting ethernet device * settings. Kernel support for it was first published in 2.4.0-test11, but * only in 2.4.15 were the ethtool_value struct and the ETHTOOL_GLINK ioctl * added to ethtool.h (together, because the ETHTOOL_GLINK ioctl expects a * single value response). * * Likewise, ioctls for getting and setting TSO were published in 2.4.22. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0) # include <linux/ethtool.h> # ifndef ETHTOOL_GLINK # define ETHTOOL_GLINK 0x0a typedef struct { __u32 cmd; __u32 data; } compat_ethtool_value; # else typedef struct ethtool_value compat_ethtool_value; # endif # ifndef ETHTOOL_GTSO # define ETHTOOL_GTSO 0x1E # define ETHTOOL_STSO 0x1F # endif #endif #if COMPAT_LINUX_VERSION_CHECK_LT(3, 3, 0) # define compat_ethtool_rxfh_indir_default(i, num_queues) (i % num_queues) #else # define compat_ethtool_rxfh_indir_default(i, num_queues) ethtool_rxfh_indir_default(i, num_queues) #endif #endif /* _COMPAT_ETHTOOL_H */ vmhgfs-only/shared/vm_basic_asm_x86_common.h 0000444 0000000 0000000 00000033175 13432725350 020157 0 ustar root root /********************************************************* * Copyright (C) 2013-2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vm_basic_asm_x86_common.h -- * * Basic assembler macros common to 32-bit and 64-bit x86 ISA. */ #ifndef _VM_BASIC_ASM_X86_COMMON_H_ #define _VM_BASIC_ASM_X86_COMMON_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" #ifndef VM_X86_ANY #error "Should be included only in x86 builds" #endif /* * x86-64 windows doesn't support inline asm so we have to use these * intrinsic functions defined in the compiler. Not all of these are well * documented. There is an array in the compiler dll (c1.dll) which has * an array of the names of all the intrinsics minus the leading * underscore. Searching around in the ntddk.h file can also be helpful. * * The declarations for the intrinsic functions were taken from the DDK. * Our declarations must match the ddk's otherwise the 64-bit c++ compiler * will complain about second linkage of the intrinsic functions. * We define the intrinsic using the basic types corresponding to the * Windows typedefs. This avoids having to include windows header files * to get to the windows types. */ #if defined(_MSC_VER) && !defined(BORA_NO_WIN32_INTRINS) #ifdef __cplusplus extern "C" { #endif /* * It seems x86 & x86-64 windows still implements these intrinsic * functions. The documentation for the x86-64 suggest the * __inbyte/__outbyte intrinsics even though the _in/_out work fine and * __inbyte/__outbyte aren't supported on x86. */ int _inp(unsigned short); unsigned short _inpw(unsigned short); unsigned long _inpd(unsigned short); int _outp(unsigned short, int); unsigned short _outpw(unsigned short, unsigned short); unsigned long _outpd(uint16, unsigned long); #pragma intrinsic(_inp, _inpw, _inpd, _outp, _outpw, _outpw, _outpd) /* * Prevents compiler from re-ordering reads, writes and reads&writes. * These functions do not add any instructions thus only affect * the compiler ordering. * * See: * `Lockless Programming Considerations for Xbox 360 and Microsoft Windows' * http://msdn.microsoft.com/en-us/library/bb310595(VS.85).aspx */ void _ReadBarrier(void); void _WriteBarrier(void); void _ReadWriteBarrier(void); #pragma intrinsic(_ReadBarrier, _WriteBarrier, _ReadWriteBarrier) void _mm_mfence(void); void _mm_lfence(void); #pragma intrinsic(_mm_mfence, _mm_lfence) long _InterlockedXor(long volatile *, long); #pragma intrinsic(_InterlockedXor) unsigned int __getcallerseflags(void); #pragma intrinsic(__getcallerseflags) #ifdef VM_X86_64 /* * intrinsic functions only supported by x86-64 windows as of 2k3sp1 */ unsigned __int64 __rdtsc(void); void __stosw(unsigned short *, unsigned short, size_t); void __stosd(unsigned long *, unsigned long, size_t); void _mm_pause(void); #pragma intrinsic(__rdtsc, __stosw, __stosd, _mm_pause) unsigned char _BitScanForward64(unsigned long *, unsigned __int64); unsigned char _BitScanReverse64(unsigned long *, unsigned __int64); #pragma intrinsic(_BitScanForward64, _BitScanReverse64) #endif /* VM_X86_64 */ unsigned char _BitScanForward(unsigned long *, unsigned long); unsigned char _BitScanReverse(unsigned long *, unsigned long); #pragma intrinsic(_BitScanForward, _BitScanReverse) unsigned char _bittest(const long *, long); unsigned char _bittestandset(long *, long); unsigned char _bittestandreset(long *, long); unsigned char _bittestandcomplement(long *, long); #pragma intrinsic(_bittest, _bittestandset, _bittestandreset, _bittestandcomplement) #ifdef VM_X86_64 unsigned char _bittestandset64(__int64 *, __int64); unsigned char _bittestandreset64(__int64 *, __int64); #pragma intrinsic(_bittestandset64, _bittestandreset64) #endif // VM_X86_64 #ifdef __cplusplus } #endif #endif // _MSC_VER #ifdef __GNUC__ /* * Checked against the Intel manual and GCC --hpreg * * volatile because reading from port can modify the state of the underlying * hardware. * * Note: The undocumented %z construct doesn't work (internal compiler error) * with gcc-2.95.1 */ #define __GCC_IN(s, type, name) \ static INLINE type \ name(uint16 port) \ { \ type val; \ \ __asm__ __volatile__( \ "in" #s " %w1, %0" \ : "=a" (val) \ : "Nd" (port) \ ); \ \ return val; \ } __GCC_IN(b, uint8, INB) __GCC_IN(w, uint16, INW) __GCC_IN(l, uint32, IN32) /* * Checked against the Intel manual and GCC --hpreg * * Note: The undocumented %z construct doesn't work (internal compiler error) * with gcc-2.95.1 */ #define __GCC_OUT(s, s2, port, val) do { \ __asm__( \ "out" #s " %" #s2 "1, %w0" \ : \ : "Nd" (port), "a" (val) \ ); \ } while (0) #define OUTB(port, val) __GCC_OUT(b, b, port, val) #define OUTW(port, val) __GCC_OUT(w, w, port, val) #define OUT32(port, val) __GCC_OUT(l, , port, val) #define GET_CURRENT_EIP(_eip) \ __asm__ __volatile("call 0\n\tpopl %0" : "=r" (_eip): ); static INLINE unsigned int GetCallerEFlags(void) { unsigned long flags; asm volatile("pushf; pop %0" : "=r"(flags)); return flags; } #elif defined(_MSC_VER) static INLINE uint8 INB(uint16 port) { return (uint8)_inp(port); } static INLINE void OUTB(uint16 port, uint8 value) { _outp(port, value); } static INLINE uint16 INW(uint16 port) { return _inpw(port); } static INLINE void OUTW(uint16 port, uint16 value) { _outpw(port, value); } static INLINE uint32 IN32(uint16 port) { return _inpd(port); } static INLINE void OUT32(uint16 port, uint32 value) { _outpd(port, value); } #ifndef VM_X86_64 #ifdef NEAR #undef NEAR #endif #define GET_CURRENT_EIP(_eip) do { \ __asm call NEAR PTR $+5 \ __asm pop eax \ __asm mov _eip, eax \ } while (0) #endif // VM_X86_64 static INLINE unsigned int GetCallerEFlags(void) { return __getcallerseflags(); } #endif // __GNUC__ /* Sequence recommended by Intel for the Pentium 4. */ #define INTEL_MICROCODE_VERSION() ( \ __SET_MSR(MSR_BIOS_SIGN_ID, 0), \ __GET_EAX_FROM_CPUID(1), \ __GET_MSR(MSR_BIOS_SIGN_ID)) /* *----------------------------------------------------------------------------- * * RDTSC_BARRIER -- * * Implements an RDTSC fence. Instructions executed prior to the * fence will have completed before the fence and all stores to * memory are flushed from the store buffer. * * On AMD, MFENCE is sufficient. On Intel, only LFENCE is * documented to fence RDTSC, but LFENCE won't drain the store * buffer. So, use MFENCE;LFENCE, which will work on both AMD and * Intel. * * It is the callers' responsibility to check for SSE2 before * calling this function. * * Results: * None. * * Side effects: * Cause loads and stores prior to this to be globally visible, and * RDTSC will not pass. * *----------------------------------------------------------------------------- */ static INLINE void RDTSC_BARRIER(void) { #ifdef __GNUC__ __asm__ __volatile__( "mfence \n\t" "lfence \n\t" ::: "memory" ); #elif defined _MSC_VER /* Prevent compiler from moving code across mfence/lfence. */ _ReadWriteBarrier(); _mm_mfence(); _mm_lfence(); _ReadWriteBarrier(); #else #error No compiler defined for RDTSC_BARRIER #endif } /* * Memory Barriers * =============== * * Terminology * ----------- * * A compiler memory barrier prevents the compiler from re-ordering memory * accesses accross the barrier. It is not a CPU instruction, it is a compiler * directive (i.e. it does not emit any code). * * A CPU memory barrier prevents the CPU from re-ordering memory accesses * accross the barrier. It is a CPU instruction. * * A memory barrier is the union of a compiler memory barrier and a CPU memory * barrier. A compiler memory barrier is a useless construct by itself. It is * only useful when combined with a CPU memory barrier, to implement a memory * barrier. * * Semantics * --------- * * At the time COMPILER_*_BARRIER were created (and references to them were * added to the code), the code was only targetting x86. The intent of the code * was really to use a memory barrier, but because x86 uses a strongly ordered * memory model, the CPU would not re-order memory accesses, and the code could * get away with using just a compiler memory barrier. So COMPILER_*_BARRIER * were born and were implemented as compiler memory barriers _on x86_. But * make no mistake, _the semantics that the code expects from * COMPILER_*_BARRIER are that of a memory barrier_! * * DO NOT USE! * ----------- * * On at least one non-x86 architecture, COMPILER_*_BARRIER are * 1) Misnomers * 2) Not fine-grained enough to provide the best performance. * For the above two reasons, usage of COMPILER_*_BARRIER is now deprecated. * _Do not add new references to COMPILER_*_BARRIER._ Instead, precisely * document the intent of your code by using * <mem_type/purpose>_<before_access_type>_BARRIER_<after_access_type>. * Existing references to COMPILER_*_BARRIER are being slowly but surely * converted, and when no references are left, COMPILER_*_BARRIER will be * retired. * * Thanks for pasting this whole comment into every architecture header. */ #if defined __GNUC__ # define COMPILER_READ_BARRIER() COMPILER_MEM_BARRIER() # define COMPILER_WRITE_BARRIER() COMPILER_MEM_BARRIER() # define COMPILER_MEM_BARRIER() __asm__ __volatile__("" ::: "memory") #elif defined _MSC_VER # define COMPILER_READ_BARRIER() _ReadBarrier() # define COMPILER_WRITE_BARRIER() _WriteBarrier() # define COMPILER_MEM_BARRIER() _ReadWriteBarrier() #endif /* * Memory barriers. These take the form of * * <mem_type/purpose>_<before_access_type>_BARRIER_<after_access_type> * * where: * <mem_type/purpose> is either SMP, DMA, or MMIO. * <*_access type> is either R(load), W(store) or RW(any). * * Above every use of these memory barriers in the code, there _must_ be a * comment to justify the use, i.e. a comment which: * * 1) Precisely identifies which memory accesses must not be re-ordered across * the memory barrier. * 2) Explains why it is important that the memory accesses not be re-ordered. * * Thanks for pasting this whole comment into every architecture header. * * On x86, we only need to care specifically about store-load reordering on * normal memory types. In other cases, only a compiler barrier is needed. * SMP_W_BARRIER_R is implemented with a locked xor operation (instead of the * mfence instruction) for performance reasons. See PR 1674199 for more * details. * * On x64, special instructions are only provided for load-load (lfence) and * store-store (sfence) ordering, and they don't apply to normal memory. */ static INLINE void SMP_W_BARRIER_R(void) { volatile long temp; #if defined __GNUC__ __asm__ __volatile__ ( "lock xorl $1, %0" : "+m" (temp) : /* no additional inputs */ : "cc", "memory"); #elif defined _MSC_VER _InterlockedXor(&temp, 1); #else #error SMP_W_BARRIER_R not defined for this compiler #endif } #define SMP_R_BARRIER_R() COMPILER_READ_BARRIER() #define SMP_R_BARRIER_W() COMPILER_MEM_BARRIER() #define SMP_R_BARRIER_RW() COMPILER_MEM_BARRIER() #define SMP_W_BARRIER_W() COMPILER_WRITE_BARRIER() #define SMP_W_BARRIER_RW() SMP_W_BARRIER_R() #define SMP_RW_BARRIER_R() SMP_W_BARRIER_R() #define SMP_RW_BARRIER_W() COMPILER_MEM_BARRIER() #define SMP_RW_BARRIER_RW() SMP_W_BARRIER_R() /* * Like the above, only for use with observers other than CPUs, * i.e. DMA masters. */ #define DMA_R_BARRIER_R() SMP_R_BARRIER_R() #define DMA_R_BARRIER_W() SMP_R_BARRIER_W() #define DMA_R_BARRIER_RW() SMP_R_BARRIER_RW() #define DMA_W_BARRIER_R() SMP_W_BARRIER_R() #define DMA_W_BARRIER_W() SMP_W_BARRIER_W() #define DMA_W_BARRIER_RW() SMP_W_BARRIER_RW() #define DMA_RW_BARRIER_R() SMP_RW_BARRIER_R() #define DMA_RW_BARRIER_W() SMP_RW_BARRIER_W() #define DMA_RW_BARRIER_RW() SMP_RW_BARRIER_RW() /* * And finally a set for use with MMIO accesses. */ #define MMIO_R_BARRIER_R() SMP_R_BARRIER_R() #define MMIO_R_BARRIER_W() SMP_R_BARRIER_W() #define MMIO_R_BARRIER_RW() SMP_R_BARRIER_RW() #define MMIO_W_BARRIER_R() SMP_W_BARRIER_R() #define MMIO_W_BARRIER_W() SMP_W_BARRIER_W() #define MMIO_W_BARRIER_RW() SMP_W_BARRIER_RW() #define MMIO_RW_BARRIER_R() SMP_RW_BARRIER_R() #define MMIO_RW_BARRIER_W() SMP_RW_BARRIER_W() #define MMIO_RW_BARRIER_RW() SMP_RW_BARRIER_RW() #endif // _VM_BASIC_ASM_X86_COMMON_H_ vmhgfs-only/shared/vm_device_version.h 0000444 0000000 0000000 00000034705 13432725350 017165 0 ustar root root /********************************************************* * Copyright (C) 1998,2005-2012,2014-2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef VM_DEVICE_VERSION_H #define VM_DEVICE_VERSION_H #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMKDRIVERS #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" #ifdef _WIN32 #ifdef __MINGW32__ #include "initguid.h" #else #include "guiddef.h" #endif #endif /* LSILogic 53C1030 Parallel SCSI controller * LSILogic SAS1068 SAS controller */ #define PCI_VENDOR_ID_LSILOGIC 0x1000 #define PCI_DEVICE_ID_LSI53C1030 0x0030 #define PCI_DEVICE_ID_LSISAS1068 0x0054 /* Our own PCI IDs * VMware SVGA II (Unified VGA) * VMware SVGA (PCI Accelerator) * VMware vmxnet (Idealized NIC) * VMware vmxscsi (Abortive idealized SCSI controller) * VMware chipset (Subsystem ID for our motherboards) * VMware e1000 (Subsystem ID) * VMware vmxnet3 (Uniform Pass Through NIC) * VMware HD Audio codec * VMware HD Audio controller */ #define PCI_VENDOR_ID_VMWARE 0x15AD #define PCI_DEVICE_ID_VMWARE_SVGA2 0x0405 #define PCI_DEVICE_ID_VMWARE_SVGA 0x0710 #define PCI_DEVICE_ID_VMWARE_VGA 0x0711 #define PCI_DEVICE_ID_VMWARE_NET 0x0720 #define PCI_DEVICE_ID_VMWARE_SCSI 0x0730 #define PCI_DEVICE_ID_VMWARE_VMCI 0x0740 #define PCI_DEVICE_ID_VMWARE_CHIPSET 0x1976 #define PCI_DEVICE_ID_VMWARE_82545EM 0x0750 /* single port */ #define PCI_DEVICE_ID_VMWARE_82546EB 0x0760 /* dual port */ #define PCI_DEVICE_ID_VMWARE_EHCI 0x0770 #define PCI_DEVICE_ID_VMWARE_UHCI 0x0774 #define PCI_DEVICE_ID_VMWARE_XHCI_0096 0x0778 #define PCI_DEVICE_ID_VMWARE_XHCI_0100 0x0779 #define PCI_DEVICE_ID_VMWARE_1394 0x0780 #define PCI_DEVICE_ID_VMWARE_BRIDGE 0x0790 #define PCI_DEVICE_ID_VMWARE_ROOTPORT 0x07A0 #define PCI_DEVICE_ID_VMWARE_VMXNET3 0x07B0 #define PCI_DEVICE_ID_VMWARE_PVSCSI 0x07C0 #define PCI_DEVICE_ID_VMWARE_82574 0x07D0 #define PCI_DEVICE_ID_VMWARE_AHCI 0x07E0 #define PCI_DEVICE_ID_VMWARE_NVME 0x07F0 #define PCI_DEVICE_ID_VMWARE_HDAUDIO_CODEC 0x1975 #define PCI_DEVICE_ID_VMWARE_HDAUDIO_CONTROLLER 0x1977 /* * TXT vendor, device and revision ID. We are keeping vendor * as Intel since tboot code does not like anything other * than Intel in the SINIT ACM header. */ #define TXT_VENDOR_ID 0x8086 #define TXT_DEVICE_ID 0xB002 #define TXT_REVISION_ID 0x01 /* The hypervisor device might grow. Please leave room * for 7 more subfunctions. */ #define PCI_DEVICE_ID_VMWARE_HYPER 0x0800 #define PCI_DEVICE_ID_VMWARE_VMI 0x0801 #define PCI_DEVICE_VMI_CLASS 0x05 #define PCI_DEVICE_VMI_SUBCLASS 0x80 #define PCI_DEVICE_VMI_INTERFACE 0x00 #define PCI_DEVICE_VMI_REVISION 0x01 /* * Device IDs for the PCI passthru test device: * * 0x0809 is for old fashioned PCI with MSI. * 0x080A is for PCI express with MSI-X. * 0x080B is for PCI express with configurable BARs. */ #define PCI_DEVICE_ID_VMWARE_PCI_TEST 0x0809 #define PCI_DEVICE_ID_VMWARE_PCIE_TEST1 0x080A #define PCI_DEVICE_ID_VMWARE_PCIE_TEST2 0x080B #define PCI_DEVICE_ID_VMWARE_VRDMA 0x0820 #define PCI_DEVICE_ID_VMWARE_VTPM 0x0830 /* * VMware Virtual Device Test Infrastructure (VDTI) devices */ #define PCI_DEVICE_ID_VMWARE_VDTI 0x7E57 /* stands for "TEST" */ /* From linux/pci_ids.h: * AMD Lance Ethernet controller * BusLogic SCSI controller * Ensoniq ES1371 sound controller */ #define PCI_VENDOR_ID_AMD 0x1022 #define PCI_DEVICE_ID_AMD_VLANCE 0x2000 #define PCI_DEVICE_ID_AMD_IOMMU 0x1577 #define PCI_VENDOR_ID_BUSLOGIC 0x104B #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER_NC 0x0140 #define PCI_DEVICE_ID_BUSLOGIC_MULTIMASTER 0x1040 #define PCI_VENDOR_ID_ENSONIQ 0x1274 #define PCI_DEVICE_ID_ENSONIQ_ES1371 0x1371 /* From linux/pci_ids.h: * Intel 82439TX (430 HX North Bridge) * Intel 82371AB (PIIX4 South Bridge) * Intel 82443BX (440 BX North Bridge and AGP Bridge) * Intel 82545EM (e1000, server adapter, single port) * Intel 82546EB (e1000, server adapter, dual port) * Intel HECI (as embedded in ich9m) * Intel XHCI (Panther Point / Intel 7 Series) */ #define PCI_VENDOR_ID_INTEL 0x8086 #define PCI_DEVICE_ID_INTEL_82439TX 0x7100 #define PCI_DEVICE_ID_INTEL_82371AB_0 0x7110 #define PCI_DEVICE_ID_INTEL_82371AB_2 0x7112 #define PCI_DEVICE_ID_INTEL_82371AB_3 0x7113 #define PCI_DEVICE_ID_INTEL_82371AB 0x7111 #define PCI_DEVICE_ID_INTEL_82443BX 0x7190 #define PCI_DEVICE_ID_INTEL_82443BX_1 0x7191 #define PCI_DEVICE_ID_INTEL_82443BX_2 0x7192 /* Used when no AGP support */ #define PCI_DEVICE_ID_INTEL_82545EM 0x100f #define PCI_DEVICE_ID_INTEL_82546EB 0x1010 #define PCI_DEVICE_ID_INTEL_82574 0x10d3 #define PCI_DEVICE_ID_INTEL_82574_APPLE 0x10f6 #define PCI_DEVICE_ID_INTEL_HECI 0x2a74 #define PCI_DEVICE_ID_INTEL_PANTHERPOINT_XHCI 0x1e31 /* * From drivers/usb/host/xhci-pci.c: * Intel XHCI (Lynx Point / Intel 8 Series) */ #define PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI 0x8c31 /* * Intel Volume Management Device (VMD) */ #define PCI_DEVICE_ID_INTEL_VMD_V1 0x201d /* * Intel Quickassist (QAT) devices. */ #define PCI_DEVICE_ID_INTEL_QAT_DH895XCC 0x0435 #define PCI_DEVICE_ID_INTEL_QAT_DH895XCC_VF 0x0443 #define PCI_DEVICE_ID_INTEL_QAT_C62X 0x37c8 #define PCI_DEVICE_ID_INTEL_QAT_C62X_VF 0x37c9 /* * Intel FPGAs */ #define PCI_DEVICE_ID_INTEL_FPGA_SKL_PF 0xbcc0 #define PCI_DEVICE_ID_INTEL_FPGA_SKL_VF 0xbcc1 #define E1000E_PCI_DEVICE_ID_CONFIG_STR "e1000e.pci.deviceID" #define E1000E_PCI_SUB_VENDOR_ID_CONFIG_STR "e1000e.pci.subVendorID" #define E1000E_PCI_SUB_DEVICE_ID_CONFIG_STR "e1000e.pci.subDeviceID" /* * Intel HD Audio controller and Realtek ALC885 codec. */ #define PCI_DEVICE_ID_INTEL_631XESB_632XESB 0x269a #define PCI_VENDOR_ID_REALTEK 0x10ec #define PCI_DEVICE_ID_REALTEK_ALC885 0x0885 /* * Fresco Logic xHCI (USB 3.0) Controller */ #define PCI_VENDOR_ID_FRESCO 0x1B73 #define PCI_DEVICE_ID_FRESCO_FL1000 0x1000 // Original 1-port chip #define PCI_DEVICE_ID_FRESCO_FL1009 0x1009 // New 2-port chip (Driver 3.0.98+) #define PCI_DEVICE_ID_FRESCO_FL1400 0x1400 // Unknown (4-port? Dev hardware?) /* * NEC/Renesas xHCI (USB 3.0) Controller */ #define PCI_VENDOR_ID_NEC 0x1033 #define PCI_DEVICE_ID_NEC_UPD720200 0x0194 #define PCI_REVISION_NEC_UPD720200 0x03 #define PCI_FIRMWARE_NEC_UPD720200 0x3015 #define SATA_ID_SERIAL_STR "00000000000000000001" /* Must be 20 Bytes */ #define SATA_ID_FIRMWARE_STR "00000001" /* Must be 8 Bytes */ #define AHCI_ATA_MODEL_STR PRODUCT_GENERIC_NAME " Virtual SATA Hard Drive" #define AHCI_ATAPI_MODEL_STR PRODUCT_GENERIC_NAME " Virtual SATA CDRW Drive" /************* Strings for IDE Identity Fields **************************/ #define VIDE_ID_SERIAL_STR "00000000000000000001" /* Must be 20 Bytes */ #define VIDE_ID_FIRMWARE_STR "00000001" /* Must be 8 Bytes */ /* No longer than 40 Bytes */ #define VIDE_ATA_MODEL_STR PRODUCT_GENERIC_NAME " Virtual IDE Hard Drive" #define VIDE_ATAPI_MODEL_STR PRODUCT_GENERIC_NAME " Virtual IDE CDROM Drive" #define ATAPI_VENDOR_ID "NECVMWar" /* Must be 8 Bytes */ #define ATAPI_PRODUCT_ID PRODUCT_GENERIC_NAME " IDE CDROM" /* Must be 16 Bytes */ #define ATAPI_REV_LEVEL "1.00" /* Must be 4 Bytes */ #define IDE_NUM_INTERFACES 2 /* support for two interfaces */ #define IDE_DRIVES_PER_IF 2 /************* Strings for SCSI Identity Fields **************************/ #define SCSI_DISK_MODEL_STR PRODUCT_GENERIC_NAME " Virtual SCSI Hard Drive" #define SCSI_DISK_VENDOR_NAME COMPANY_NAME #define SCSI_DISK_REV_LEVEL "1.0" #define SCSI_CDROM_MODEL_STR PRODUCT_GENERIC_NAME " Virtual SCSI CDROM Drive" #define SCSI_CDROM_VENDOR_NAME COMPANY_NAME #define SCSI_CDROM_REV_LEVEL "1.0" /************* NVME implementation limits ********************************/ #define NVME_MAX_CONTROLLERS 4 #define NVME_MIN_NAMESPACES 1 #define NVME_MAX_NAMESPACES 15 /* We support only 15 namespaces same * as SCSI devices. */ /************* SCSI implementation limits ********************************/ #define SCSI_MAX_CONTROLLERS 4 // Need more than 1 for MSCS clustering #define SCSI_MAX_DEVICES 16 // BT-958 emulates only 16 #define PVSCSI_HWV14_MAX_DEVICES 65 /* HWv14 And Later Supports 64 * + controller at ID 7 */ #define PVSCSI_MAX_DEVICES 255 // 255 (including the controller) #define PVSCSI_MAX_NUM_DISKS (PVSCSI_HWV14_MAX_DEVICES - 1) /************* SATA implementation limits ********************************/ #define SATA_MAX_CONTROLLERS 4 #define SATA_MAX_DEVICES 30 #define AHCI_MIN_PORTS 1 #define AHCI_MAX_PORTS SATA_MAX_DEVICES /* * Publicly supported maximum number of disks per VM. */ #define MAX_NUM_DISKS \ ((SATA_MAX_CONTROLLERS * SATA_MAX_DEVICES) + \ (SCSI_MAX_CONTROLLERS * SCSI_MAX_DEVICES) + \ (NVME_MAX_CONTROLLERS * NVME_MAX_NAMESPACES) + \ (IDE_NUM_INTERFACES * IDE_DRIVES_PER_IF)) /* * Maximum number of supported disks in a VM from HWV14 or later, using PVSCSI updated max * devices. The note above still holds true, but instead of publicly supporting * all devices, HWv14 simply extends the maximum support to 256 devices, * instead ~244 calculated above. * * PVSCSI_HW_MAX_DEVICES is 65 - allowing 64 disks + controller (at ID 7) * 4 * 64 = 256 devices. * */ #define MAX_NUM_DISKS_HWV14 MAX(MAX_NUM_DISKS, \ (SCSI_MAX_CONTROLLERS * PVSCSI_MAX_NUM_DISKS)) /* * VSCSI_BV_INTS is the number of uint32's needed for a bit vector * to cover all scsi devices per target. */ #define VSCSI_BV_INTS CEILING(PVSCSI_MAX_DEVICES, 8 * sizeof (uint32)) #define SCSI_IDE_CHANNEL SCSI_MAX_CONTROLLERS #define SCSI_IDE_HOSTED_CHANNEL (SCSI_MAX_CONTROLLERS + 1) #define SCSI_SATA_CHANNEL_FIRST (SCSI_IDE_HOSTED_CHANNEL + 1) #define SCSI_NVME_CHANNEL_FIRST (SCSI_SATA_CHANNEL_FIRST + \ SATA_MAX_CONTROLLERS) #define SCSI_MAX_CHANNELS (SCSI_NVME_CHANNEL_FIRST + \ NVME_MAX_CONTROLLERS) /************* SCSI-NVME channel IDs *******************************/ #define NVME_ID_TO_SCSI_ID(nvmeId) \ (SCSI_NVME_CHANNEL_FIRST + (nvmeId)) #define SCSI_ID_TO_NVME_ID(scsiId) \ ((scsiId) - SCSI_NVME_CHANNEL_FIRST) /************* SCSI-SATA channel IDs********************************/ #define SATA_ID_TO_SCSI_ID(sataId) \ (SCSI_SATA_CHANNEL_FIRST + (sataId)) #define SCSI_ID_TO_SATA_ID(scsiId) \ ((scsiId) - SCSI_SATA_CHANNEL_FIRST) /************* Strings for the VESA BIOS Identity Fields *****************/ #define VBE_OEM_STRING COMPANY_NAME " SVGA" #define VBE_VENDOR_NAME COMPANY_NAME #define VBE_PRODUCT_NAME PRODUCT_GENERIC_NAME /************* PCI implementation limits ********************************/ #define PCI_MAX_BRIDGES 15 /************* Ethernet implementation limits ***************************/ #define MAX_ETHERNET_CARDS 10 /********************** Floppy limits ***********************************/ #define MAX_FLOPPY_DRIVES 2 /************* PCI Passthrough implementation limits ********************/ #define MAX_PCI_PASSTHRU_DEVICES 16 /************* Test device implementation limits ********************/ #define MAX_PCI_TEST_DEVICES 16 /************* VDTI PCI Device implementation limits ********************/ #define MAX_VDTI_PCI_DEVICES 16 /************* USB implementation limits ********************************/ #define MAX_USB_DEVICES_PER_HOST_CONTROLLER 127 /************* NVDIMM implementation limits ********************************/ #define NVDIMM_MAX_CONTROLLERS 1 #define MAX_NVDIMM 64 /************* vRDMA implementation limits ******************************/ #define MAX_VRDMA_DEVICES 1 /************* QAT implementation limits ********************/ #define MAX_QAT_PCI_DEVICES 4 /************* Strings for Host USB Driver *******************************/ #ifdef _WIN32 /* * Globally unique ID for the VMware device interface. Define INITGUID before including * this header file to instantiate the variable. */ DEFINE_GUID(GUID_DEVICE_INTERFACE_VMWARE_USB_DEVICES, 0x2da1fe75, 0xaab3, 0x4d2c, 0xac, 0xdf, 0x39, 0x8, 0x8c, 0xad, 0xa6, 0x65); /* * Globally unique ID for the VMware device setup class. */ DEFINE_GUID(GUID_CLASS_VMWARE_USB_DEVICES, 0x3b3e62a5, 0x3556, 0x4d7e, 0xad, 0xad, 0xf5, 0xfa, 0x3a, 0x71, 0x2b, 0x56); /* * This string defines the device ID string of a VMware USB device. * The format is USB\Vid_XXXX&Pid_YYYY, where XXXX and YYYY are the * hexadecimal representations of the vendor and product ids, respectively. * * The official vendor ID for VMware, Inc. is 0x0E0F. * The product id for USB generic devices is 0x0001. */ #define USB_VMWARE_DEVICE_ID_WIDE L"USB\\Vid_0E0F&Pid_0001" #define USB_DEVICE_ID_LENGTH (sizeof(USB_VMWARE_DEVICE_ID_WIDE) / sizeof(WCHAR)) #ifdef UNICODE #define USB_PNP_SETUP_CLASS_NAME L"VMwareUSBDevices" #define USB_PNP_DRIVER_NAME L"vmusb" #else #define USB_PNP_SETUP_CLASS_NAME "VMwareUSBDevices" #define USB_PNP_DRIVER_NAME "vmusb" #endif #endif /* * Our JEDEC 2 Manufacturer ID number is 2 in bank 10. Our number is nine * bytes of continuation code (with an odd parity bit in bit 7) followed by the * number itself. * */ #define JEDEC_VENDOR_ID_VMWARE 0x289 #define JEDEC_DEVICE_ID_VMWARE_NVDIMM 0x1 #endif /* VM_DEVICE_VERSION_H */ vmhgfs-only/shared/vmci_defs.h 0000444 0000000 0000000 00000066156 13432725350 015423 0 ustar root root /********************************************************* * Copyright (C) 2005-2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef _VMCI_DEF_H_ #define _VMCI_DEF_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMMEXT #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #include "includeCheck.h" #include "vm_basic_types.h" #include "vm_basic_defs.h" #include "vm_atomic.h" #include "vm_assert.h" #if defined __cplusplus extern "C" { #endif /* Register offsets. */ #define VMCI_STATUS_ADDR 0x00 #define VMCI_CONTROL_ADDR 0x04 #define VMCI_ICR_ADDR 0x08 #define VMCI_IMR_ADDR 0x0c #define VMCI_DATA_OUT_ADDR 0x10 #define VMCI_DATA_IN_ADDR 0x14 #define VMCI_CAPS_ADDR 0x18 #define VMCI_RESULT_LOW_ADDR 0x1c #define VMCI_RESULT_HIGH_ADDR 0x20 /* Max number of devices. */ #define VMCI_MAX_DEVICES 1 /* Status register bits. */ #define VMCI_STATUS_INT_ON 0x1 /* Control register bits. */ #define VMCI_CONTROL_RESET 0x1 #define VMCI_CONTROL_INT_ENABLE 0x2 #define VMCI_CONTROL_INT_DISABLE 0x4 /* Capabilities register bits. */ #define VMCI_CAPS_HYPERCALL 0x1 #define VMCI_CAPS_GUESTCALL 0x2 #define VMCI_CAPS_DATAGRAM 0x4 #define VMCI_CAPS_NOTIFICATIONS 0x8 /* Interrupt Cause register bits. */ #define VMCI_ICR_DATAGRAM 0x1 #define VMCI_ICR_NOTIFICATION 0x2 /* Interrupt Mask register bits. */ #define VMCI_IMR_DATAGRAM 0x1 #define VMCI_IMR_NOTIFICATION 0x2 /* Interrupt type. */ typedef enum VMCIIntrType { VMCI_INTR_TYPE_INTX = 0, VMCI_INTR_TYPE_MSI = 1, VMCI_INTR_TYPE_MSIX = 2 } VMCIIntrType; /* * Maximum MSI/MSI-X interrupt vectors in the device. */ #define VMCI_MAX_INTRS 2 /* * Supported interrupt vectors. There is one for each ICR value above, * but here they indicate the position in the vector array/message ID. */ #define VMCI_INTR_DATAGRAM 0 #define VMCI_INTR_NOTIFICATION 1 /* * A single VMCI device has an upper limit of 128 MiB on the amount of * memory that can be used for queue pairs. */ #define VMCI_MAX_GUEST_QP_MEMORY (128 * 1024 * 1024) /* * We have a fixed set of resource IDs available in the VMX. * This allows us to have a very simple implementation since we statically * know how many will create datagram handles. If a new caller arrives and * we have run out of slots we can manually increment the maximum size of * available resource IDs. */ typedef uint32 VMCI_Resource; /* VMCI reserved hypervisor datagram resource IDs. */ #define VMCI_RESOURCES_QUERY 0 #define VMCI_GET_CONTEXT_ID 1 #define VMCI_SET_NOTIFY_BITMAP 2 #define VMCI_DOORBELL_LINK 3 #define VMCI_DOORBELL_UNLINK 4 #define VMCI_DOORBELL_NOTIFY 5 /* * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are * obsoleted by the removal of VM to VM communication. */ #define VMCI_DATAGRAM_REQUEST_MAP 6 #define VMCI_DATAGRAM_REMOVE_MAP 7 #define VMCI_EVENT_SUBSCRIBE 8 #define VMCI_EVENT_UNSUBSCRIBE 9 #define VMCI_QUEUEPAIR_ALLOC 10 #define VMCI_QUEUEPAIR_DETACH 11 /* * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1, * WS 7.0/7.1 and ESX 4.1 */ #define VMCI_HGFS_TRANSPORT 13 #define VMCI_UNITY_PBRPC_REGISTER 14 /* * This resource is used for VMCI socket control packets sent to the * hypervisor (CID 0) because RID 1 is already reserved. */ #define VSOCK_PACKET_HYPERVISOR_RID 15 #define VMCI_RESOURCE_MAX 16 /* * The core VMCI device functionality only requires the resource IDs of * VMCI_QUEUEPAIR_DETACH and below. */ #define VMCI_CORE_DEVICE_RESOURCE_MAX VMCI_QUEUEPAIR_DETACH /* * VMCI reserved host datagram resource IDs. * vsock control channel has resource id 1. */ #define VMCI_DVFILTER_DATA_PATH_DATAGRAM 2 /* VMCI Ids. */ typedef uint32 VMCIId; typedef struct VMCIIdRange { int8 action; // VMCI_FA_X, for use in filters. VMCIId begin; // Beginning of range VMCIId end; // End of range } VMCIIdRange; typedef struct VMCIHandle { VMCIId context; VMCIId resource; } VMCIHandle; static INLINE VMCIHandle VMCI_MAKE_HANDLE(VMCIId cid, // IN: VMCIId rid) // IN: { VMCIHandle h; h.context = cid; h.resource = rid; return h; } /* *---------------------------------------------------------------------- * * VMCI_HANDLE_TO_UINT64 -- * * Helper for VMCI handle to uint64 conversion. * * Results: * The uint64 value. * * Side effects: * None. * *---------------------------------------------------------------------- */ static INLINE uint64 VMCI_HANDLE_TO_UINT64(VMCIHandle handle) // IN: { uint64 handle64; handle64 = handle.context; handle64 <<= 32; handle64 |= handle.resource; return handle64; } /* *---------------------------------------------------------------------- * * VMCI_UINT64_TO_HANDLE -- * * Helper for uint64 to VMCI handle conversion. * * Results: * The VMCI handle value. * * Side effects: * None. * *---------------------------------------------------------------------- */ static INLINE VMCIHandle VMCI_UINT64_TO_HANDLE(uint64 handle64) // IN: { VMCIId context = (VMCIId)(handle64 >> 32); VMCIId resource = (VMCIId)handle64; return VMCI_MAKE_HANDLE(context, resource); } #define VMCI_HANDLE_TO_CONTEXT_ID(_handle) ((_handle).context) #define VMCI_HANDLE_TO_RESOURCE_ID(_handle) ((_handle).resource) #define VMCI_HANDLE_EQUAL(_h1, _h2) ((_h1).context == (_h2).context && \ (_h1).resource == (_h2).resource) #define VMCI_INVALID_ID 0xFFFFFFFF static const VMCIHandle VMCI_INVALID_HANDLE = {VMCI_INVALID_ID, VMCI_INVALID_ID}; #define VMCI_HANDLE_INVALID(_handle) \ VMCI_HANDLE_EQUAL((_handle), VMCI_INVALID_HANDLE) /* * The below defines can be used to send anonymous requests. * This also indicates that no response is expected. */ #define VMCI_ANON_SRC_CONTEXT_ID VMCI_INVALID_ID #define VMCI_ANON_SRC_RESOURCE_ID VMCI_INVALID_ID #define VMCI_ANON_SRC_HANDLE VMCI_MAKE_HANDLE(VMCI_ANON_SRC_CONTEXT_ID, \ VMCI_ANON_SRC_RESOURCE_ID) /* The lowest 16 context ids are reserved for internal use. */ #define VMCI_RESERVED_CID_LIMIT 16 /* * Hypervisor context id, used for calling into hypervisor * supplied services from the VM. */ #define VMCI_HYPERVISOR_CONTEXT_ID 0 /* * Well-known context id, a logical context that contains a set of * well-known services. This context ID is now obsolete. */ #define VMCI_WELL_KNOWN_CONTEXT_ID 1 /* * Context ID used by host endpoints. */ #define VMCI_HOST_CONTEXT_ID 2 #define VMCI_HOST_CONTEXT_INVALID_EVENT ((uintptr_t)~0) #define VMCI_CONTEXT_IS_VM(_cid) (VMCI_INVALID_ID != _cid && \ _cid > VMCI_HOST_CONTEXT_ID) /* * The VMCI_CONTEXT_RESOURCE_ID is used together with VMCI_MAKE_HANDLE to make * handles that refer to a specific context. */ #define VMCI_CONTEXT_RESOURCE_ID 0 /* *----------------------------------------------------------------------------- * * VMCI error codes. * *----------------------------------------------------------------------------- */ #define VMCI_SUCCESS_QUEUEPAIR_ATTACH 5 #define VMCI_SUCCESS_QUEUEPAIR_CREATE 4 #define VMCI_SUCCESS_LAST_DETACH 3 #define VMCI_SUCCESS_ACCESS_GRANTED 2 #define VMCI_SUCCESS_ENTRY_DEAD 1 #define VMCI_SUCCESS 0LL #define VMCI_ERROR_INVALID_RESOURCE (-1) #define VMCI_ERROR_INVALID_ARGS (-2) #define VMCI_ERROR_NO_MEM (-3) #define VMCI_ERROR_DATAGRAM_FAILED (-4) #define VMCI_ERROR_MORE_DATA (-5) #define VMCI_ERROR_NO_MORE_DATAGRAMS (-6) #define VMCI_ERROR_NO_ACCESS (-7) #define VMCI_ERROR_NO_HANDLE (-8) #define VMCI_ERROR_DUPLICATE_ENTRY (-9) #define VMCI_ERROR_DST_UNREACHABLE (-10) #define VMCI_ERROR_PAYLOAD_TOO_LARGE (-11) #define VMCI_ERROR_INVALID_PRIV (-12) #define VMCI_ERROR_GENERIC (-13) #define VMCI_ERROR_PAGE_ALREADY_SHARED (-14) #define VMCI_ERROR_CANNOT_SHARE_PAGE (-15) #define VMCI_ERROR_CANNOT_UNSHARE_PAGE (-16) #define VMCI_ERROR_NO_PROCESS (-17) #define VMCI_ERROR_NO_DATAGRAM (-18) #define VMCI_ERROR_NO_RESOURCES (-19) #define VMCI_ERROR_UNAVAILABLE (-20) #define VMCI_ERROR_NOT_FOUND (-21) #define VMCI_ERROR_ALREADY_EXISTS (-22) #define VMCI_ERROR_NOT_PAGE_ALIGNED (-23) #define VMCI_ERROR_INVALID_SIZE (-24) #define VMCI_ERROR_REGION_ALREADY_SHARED (-25) #define VMCI_ERROR_TIMEOUT (-26) #define VMCI_ERROR_DATAGRAM_INCOMPLETE (-27) #define VMCI_ERROR_INCORRECT_IRQL (-28) #define VMCI_ERROR_EVENT_UNKNOWN (-29) #define VMCI_ERROR_OBSOLETE (-30) #define VMCI_ERROR_QUEUEPAIR_MISMATCH (-31) #define VMCI_ERROR_QUEUEPAIR_NOTSET (-32) #define VMCI_ERROR_QUEUEPAIR_NOTOWNER (-33) #define VMCI_ERROR_QUEUEPAIR_NOTATTACHED (-34) #define VMCI_ERROR_QUEUEPAIR_NOSPACE (-35) #define VMCI_ERROR_QUEUEPAIR_NODATA (-36) #define VMCI_ERROR_BUSMEM_INVALIDATION (-37) #define VMCI_ERROR_MODULE_NOT_LOADED (-38) #define VMCI_ERROR_DEVICE_NOT_FOUND (-39) #define VMCI_ERROR_QUEUEPAIR_NOT_READY (-40) #define VMCI_ERROR_WOULD_BLOCK (-41) /* VMCI clients should return error code withing this range */ #define VMCI_ERROR_CLIENT_MIN (-500) #define VMCI_ERROR_CLIENT_MAX (-550) /* Internal error codes. */ #define VMCI_SHAREDMEM_ERROR_BAD_CONTEXT (-1000) #define VMCI_PATH_MAX 256 /* VMCI reserved events. */ typedef uint32 VMCI_Event; #define VMCI_EVENT_CTX_ID_UPDATE 0 // Only applicable to guest endpoints #define VMCI_EVENT_CTX_REMOVED 1 // Applicable to guest and host #define VMCI_EVENT_QP_RESUMED 2 // Only applicable to guest endpoints #define VMCI_EVENT_QP_PEER_ATTACH 3 // Applicable to guest, host and VMX #define VMCI_EVENT_QP_PEER_DETACH 4 // Applicable to guest, host and VMX #define VMCI_EVENT_MEM_ACCESS_ON 5 // Applicable to VMX and vmk. On vmk, // this event has the Context payload type. #define VMCI_EVENT_MEM_ACCESS_OFF 6 // Applicable to VMX and vmk. Same as // above for the payload type. #define VMCI_EVENT_GUEST_PAUSED 7 // Applicable to vmk. This event has the // Context payload type. #define VMCI_EVENT_GUEST_UNPAUSED 8 // Applicable to vmk. Same as above for // the payload type. #define VMCI_EVENT_MAX 9 /* * Of the above events, a few are reserved for use in the VMX, and * other endpoints (guest and host kernel) should not use them. For * the rest of the events, we allow both host and guest endpoints to * subscribe to them, to maintain the same API for host and guest * endpoints. */ #define VMCI_EVENT_VALID_VMX(_event) (_event == VMCI_EVENT_QP_PEER_ATTACH || \ _event == VMCI_EVENT_QP_PEER_DETACH || \ _event == VMCI_EVENT_MEM_ACCESS_ON || \ _event == VMCI_EVENT_MEM_ACCESS_OFF) #if defined(VMX86_SERVER) #define VMCI_EVENT_VALID(_event) (_event < VMCI_EVENT_MAX) #else // VMX86_SERVER #define VMCI_EVENT_VALID(_event) (_event < VMCI_EVENT_MAX && \ _event != VMCI_EVENT_MEM_ACCESS_ON && \ _event != VMCI_EVENT_MEM_ACCESS_OFF && \ _event != VMCI_EVENT_GUEST_PAUSED && \ _event != VMCI_EVENT_GUEST_UNPAUSED) #endif // VMX86_SERVER /* Reserved guest datagram resource ids. */ #define VMCI_EVENT_HANDLER 0 /* VMCI privileges. */ typedef enum VMCIResourcePrivilegeType { VMCI_PRIV_CH_PRIV, VMCI_PRIV_DESTROY_RESOURCE, VMCI_PRIV_ASSIGN_CLIENT, VMCI_PRIV_DG_CREATE, VMCI_PRIV_DG_SEND, VMCI_PRIV_NOTIFY, VMCI_NUM_PRIVILEGES, } VMCIResourcePrivilegeType; /* * VMCI coarse-grained privileges (per context or host * process/endpoint. An entity with the restricted flag is only * allowed to interact with the hypervisor and trusted entities. */ typedef uint32 VMCIPrivilegeFlags; #define VMCI_PRIVILEGE_FLAG_RESTRICTED 0x01 #define VMCI_PRIVILEGE_FLAG_TRUSTED 0x02 #define VMCI_PRIVILEGE_ALL_FLAGS (VMCI_PRIVILEGE_FLAG_RESTRICTED | \ VMCI_PRIVILEGE_FLAG_TRUSTED) #define VMCI_NO_PRIVILEGE_FLAGS 0x00 #define VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS VMCI_NO_PRIVILEGE_FLAGS #define VMCI_LEAST_PRIVILEGE_FLAGS VMCI_PRIVILEGE_FLAG_RESTRICTED #define VMCI_MAX_PRIVILEGE_FLAGS VMCI_PRIVILEGE_FLAG_TRUSTED #define VMCI_PUBLIC_GROUP_NAME "vmci public group" /* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */ #define VMCI_RESERVED_RESOURCE_ID_MAX 1023 #define VMCI_DOMAIN_NAME_MAXLEN 32 #define VMCI_LGPFX "VMCI: " /* * VMCIQueueHeader * * A Queue cannot stand by itself as designed. Each Queue's header * contains a pointer into itself (the producerTail) and into its peer * (consumerHead). The reason for the separation is one of * accessibility: Each end-point can modify two things: where the next * location to enqueue is within its produceQ (producerTail); and * where the next dequeue location is in its consumeQ (consumerHead). * * An end-point cannot modify the pointers of its peer (guest to * guest; NOTE that in the host both queue headers are mapped r/w). * But, each end-point needs read access to both Queue header * structures in order to determine how much space is used (or left) * in the Queue. This is because for an end-point to know how full * its produceQ is, it needs to use the consumerHead that points into * the produceQ but -that- consumerHead is in the Queue header for * that end-points consumeQ. * * Thoroughly confused? Sorry. * * producerTail: the point to enqueue new entrants. When you approach * a line in a store, for example, you walk up to the tail. * * consumerHead: the point in the queue from which the next element is * dequeued. In other words, who is next in line is he who is at the * head of the line. * * Also, producerTail points to an empty byte in the Queue, whereas * consumerHead points to a valid byte of data (unless producerTail == * consumerHead in which case consumerHead does not point to a valid * byte of data). * * For a queue of buffer 'size' bytes, the tail and head pointers will be in * the range [0, size-1]. * * If produceQHeader->producerTail == consumeQHeader->consumerHead * then the produceQ is empty. */ typedef struct VMCIQueueHeader { /* All fields are 64bit and aligned. */ VMCIHandle handle; /* Identifier. */ Atomic_uint64 producerTail; /* Offset in this queue. */ Atomic_uint64 consumerHead; /* Offset in peer queue. */ } VMCIQueueHeader; /* * If one client of a QueuePair is a 32bit entity, we restrict the QueuePair * size to be less than 4GB, and use 32bit atomic operations on the head and * tail pointers. 64bit atomic read on a 32bit entity involves cmpxchg8b which * is an atomic read-modify-write. This will cause traces to fire when a 32bit * consumer tries to read the producer's tail pointer, for example, because the * consumer has read-only access to the producer's tail pointer. * * We provide the following macros to invoke 32bit or 64bit atomic operations * based on the architecture the code is being compiled on. */ /* Architecture independent maximum queue size. */ #define QP_MAX_QUEUE_SIZE_ARCH_ANY CONST64U(0xffffffff) #ifdef __x86_64__ # define QP_MAX_QUEUE_SIZE_ARCH CONST64U(0xffffffffffffffff) # define QPAtomic_ReadOffset(x) Atomic_Read64(x) # define QPAtomic_WriteOffset(x, y) Atomic_Write64(x, y) #else /* * Wrappers below are being used to call Atomic_Read32 because of the * 'type punned' compilation warning received when Atomic_Read32 is * called with a Atomic_uint64 pointer typecasted to Atomic_uint32 * pointer from QPAtomic_ReadOffset. Ditto with QPAtomic_WriteOffset. */ static INLINE uint32 TypeSafe_Atomic_Read32(void *var) // IN: { return Atomic_Read32((Atomic_uint32 *)(var)); } static INLINE void TypeSafe_Atomic_Write32(void *var, uint32 val) // IN: { Atomic_Write32((Atomic_uint32 *)(var), (uint32)(val)); } # define QP_MAX_QUEUE_SIZE_ARCH CONST64U(0xffffffff) # define QPAtomic_ReadOffset(x) TypeSafe_Atomic_Read32((void *)(x)) # define QPAtomic_WriteOffset(x, y) \ TypeSafe_Atomic_Write32((void *)(x), (uint32)(y)) #endif /* __x86_64__ */ /* *----------------------------------------------------------------------------- * * QPAddPointer -- * * Helper to add a given offset to a head or tail pointer. Wraps the value * of the pointer around the max size of the queue. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void QPAddPointer(Atomic_uint64 *var, // IN: size_t add, // IN: uint64 size) // IN: { uint64 newVal = QPAtomic_ReadOffset(var); if (newVal >= size - add) { newVal -= size; } newVal += add; QPAtomic_WriteOffset(var, newVal); } /* *----------------------------------------------------------------------------- * * VMCIQueueHeader_ProducerTail() -- * * Helper routine to get the Producer Tail from the supplied queue. * * Results: * The contents of the queue's producer tail. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE uint64 VMCIQueueHeader_ProducerTail(const VMCIQueueHeader *qHeader) // IN: { VMCIQueueHeader *qh = (VMCIQueueHeader *)qHeader; return QPAtomic_ReadOffset(&qh->producerTail); } /* *----------------------------------------------------------------------------- * * VMCIQueueHeader_ConsumerHead() -- * * Helper routine to get the Consumer Head from the supplied queue. * * Results: * The contents of the queue's consumer tail. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE uint64 VMCIQueueHeader_ConsumerHead(const VMCIQueueHeader *qHeader) // IN: { VMCIQueueHeader *qh = (VMCIQueueHeader *)qHeader; return QPAtomic_ReadOffset(&qh->consumerHead); } /* *----------------------------------------------------------------------------- * * VMCIQueueHeader_AddProducerTail() -- * * Helper routine to increment the Producer Tail. Fundamentally, * QPAddPointer() is used to manipulate the tail itself. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void VMCIQueueHeader_AddProducerTail(VMCIQueueHeader *qHeader, // IN/OUT: size_t add, // IN: uint64 queueSize) // IN: { QPAddPointer(&qHeader->producerTail, add, queueSize); } /* *----------------------------------------------------------------------------- * * VMCIQueueHeader_AddConsumerHead() -- * * Helper routine to increment the Consumer Head. Fundamentally, * QPAddPointer() is used to manipulate the head itself. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void VMCIQueueHeader_AddConsumerHead(VMCIQueueHeader *qHeader, // IN/OUT: size_t add, // IN: uint64 queueSize) // IN: { QPAddPointer(&qHeader->consumerHead, add, queueSize); } /* *----------------------------------------------------------------------------- * * VMCIQueueHeader_CheckAlignment -- * * Checks if the given queue is aligned to page boundary. Returns TRUE if * the alignment is good. * * Results: * TRUE or FALSE. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE Bool VMCIQueueHeader_CheckAlignment(const VMCIQueueHeader *qHeader) // IN: { uintptr_t hdr, offset; hdr = (uintptr_t) qHeader; offset = hdr & (PAGE_SIZE -1); return offset == 0; } /* *----------------------------------------------------------------------------- * * VMCIQueueHeader_GetPointers -- * * Helper routine for getting the head and the tail pointer for a queue. * Both the VMCIQueues are needed to get both the pointers for one queue. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void VMCIQueueHeader_GetPointers(const VMCIQueueHeader *produceQHeader, // IN: const VMCIQueueHeader *consumeQHeader, // IN: uint64 *producerTail, // OUT: uint64 *consumerHead) // OUT: { if (producerTail) { *producerTail = VMCIQueueHeader_ProducerTail(produceQHeader); } if (consumerHead) { *consumerHead = VMCIQueueHeader_ConsumerHead(consumeQHeader); } } /* *----------------------------------------------------------------------------- * * VMCIQueueHeader_ResetPointers -- * * Reset the tail pointer (of "this" queue) and the head pointer (of * "peer" queue). * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void VMCIQueueHeader_ResetPointers(VMCIQueueHeader *qHeader) // IN/OUT: { QPAtomic_WriteOffset(&qHeader->producerTail, CONST64U(0)); QPAtomic_WriteOffset(&qHeader->consumerHead, CONST64U(0)); } /* *----------------------------------------------------------------------------- * * VMCIQueueHeader_Init -- * * Initializes a queue's state (head & tail pointers). * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void VMCIQueueHeader_Init(VMCIQueueHeader *qHeader, // IN/OUT: const VMCIHandle handle) // IN: { qHeader->handle = handle; VMCIQueueHeader_ResetPointers(qHeader); } /* *----------------------------------------------------------------------------- * * VMCIQueueHeader_FreeSpace -- * * Finds available free space in a produce queue to enqueue more * data or reports an error if queue pair corruption is detected. * * Results: * Free space size in bytes or an error code. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE int64 VMCIQueueHeader_FreeSpace(const VMCIQueueHeader *produceQHeader, // IN: const VMCIQueueHeader *consumeQHeader, // IN: const uint64 produceQSize) // IN: { uint64 tail; uint64 head; uint64 freeSpace; tail = VMCIQueueHeader_ProducerTail(produceQHeader); head = VMCIQueueHeader_ConsumerHead(consumeQHeader); if (tail >= produceQSize || head >= produceQSize) { return VMCI_ERROR_INVALID_SIZE; } /* * Deduct 1 to avoid tail becoming equal to head which causes ambiguity. If * head and tail are equal it means that the queue is empty. */ if (tail >= head) { freeSpace = produceQSize - (tail - head) - 1; } else { freeSpace = head - tail - 1; } return freeSpace; } /* *----------------------------------------------------------------------------- * * VMCIQueueHeader_BufReady -- * * VMCIQueueHeader_FreeSpace() does all the heavy lifting of * determing the number of free bytes in a Queue. This routine, * then subtracts that size from the full size of the Queue so * the caller knows how many bytes are ready to be dequeued. * * Results: * On success, available data size in bytes (up to MAX_INT64). * On failure, appropriate error code. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE int64 VMCIQueueHeader_BufReady(const VMCIQueueHeader *consumeQHeader, // IN: const VMCIQueueHeader *produceQHeader, // IN: const uint64 consumeQSize) // IN: { int64 freeSpace; freeSpace = VMCIQueueHeader_FreeSpace(consumeQHeader, produceQHeader, consumeQSize); if (freeSpace < VMCI_SUCCESS) { return freeSpace; } else { return consumeQSize - freeSpace - 1; } } /* * Defines for the VMCI traffic filter: * - VMCI_FA_<name> defines the filter action values * - VMCI_FP_<name> defines the filter protocol values * - VMCI_FD_<name> defines the direction values (guest or host) * - VMCI_FT_<name> are the type values (allow or deny) */ #define VMCI_FA_INVALID -1 #define VMCI_FA_ALLOW 0 #define VMCI_FA_DENY (VMCI_FA_ALLOW + 1) #define VMCI_FA_MAX (VMCI_FA_DENY + 1) #define VMCI_FP_INVALID -1 #define VMCI_FP_HYPERVISOR 0 #define VMCI_FP_QUEUEPAIR (VMCI_FP_HYPERVISOR + 1) #define VMCI_FP_DOORBELL (VMCI_FP_QUEUEPAIR + 1) #define VMCI_FP_DATAGRAM (VMCI_FP_DOORBELL + 1) #define VMCI_FP_STREAMSOCK (VMCI_FP_DATAGRAM + 1) #define VMCI_FP_ANY (VMCI_FP_STREAMSOCK + 1) #define VMCI_FP_MAX (VMCI_FP_ANY + 1) #define VMCI_FD_INVALID -1 #define VMCI_FD_GUEST 0 #define VMCI_FD_HOST (VMCI_FD_GUEST + 1) #define VMCI_FD_ANY (VMCI_FD_HOST + 1) #define VMCI_FD_MAX (VMCI_FD_ANY + 1) /* * The filter list tracks VMCI Id ranges for a given filter. */ typedef struct { uint32 len; VMCIIdRange *list; } VMCIFilterList; /* * The filter info is used to communicate the filter configuration * from the VMX to the host kernel. */ typedef struct { VA64 list; // List of VMCIIdRange uint32 len; // Length of list uint8 dir; // VMCI_FD_X uint8 proto; // VMCI_FP_X } VMCIFilterInfo; /* * In the host kernel, the ingoing and outgoing filters are * separated. The VMCIProtoFilters type captures all filters in one * direction. The VMCIFilters type captures all filters. */ typedef VMCIFilterList VMCIProtoFilters[VMCI_FP_MAX]; typedef VMCIProtoFilters VMCIFilters[VMCI_FD_MAX]; #if defined __cplusplus } // extern "C" #endif #endif // _VMCI_DEF_H_ vmhgfs-only/shared/compat_version.h 0000444 0000000 0000000 00000007363 13432725347 016515 0 ustar root root /********************************************************* * Copyright (C) 1998 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_VERSION_H__ # define __COMPAT_VERSION_H__ #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMKDRIVERS #include "includeCheck.h" #ifndef __linux__ # error "linux-version.h" #endif #include <linux/version.h> #ifndef KERNEL_VERSION # error KERNEL_VERSION macro is not defined, environment is busted #endif /* * Distinguish relevant classes of Linux kernels. * * The convention is that version X defines all * the KERNEL_Y symbols where Y <= X. * * XXX Do not add more definitions here. This way of doing things does not * scale, and we are going to phase it out soon --hpreg */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 0) # define KERNEL_2_1 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 0) # define KERNEL_2_2 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 1) # define KERNEL_2_3_1 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 15) /* new networking */ # define KERNEL_2_3_15 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 25) /* new procfs */ # define KERNEL_2_3_25 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 29) /* even newer procfs */ # define KERNEL_2_3_29 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 43) /* softnet changes */ # define KERNEL_2_3_43 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 47) /* more softnet changes */ # define KERNEL_2_3_47 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 3, 99) /* name in netdevice struct is array and not pointer */ # define KERNEL_2_3_99 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 0) /* New 'owner' member at the beginning of struct file_operations */ # define KERNEL_2_4_0 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8) /* New netif_rx_ni() --hpreg */ # define KERNEL_2_4_8 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 2) /* New kdev_t, major()/minor() API --hpreg */ # define KERNEL_2_5_2 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5) /* New sk_alloc(), pte_offset_map()/pte_unmap() --hpreg */ # define KERNEL_2_5_5 #endif /* Linux kernel 3.0 can be called 2.6.40, and 3.1 can be 2.6.41... * Use COMPAT_LINUX_VERSION_CHECK_LT iff you need to compare running kernel to * versions 3.0 and above. * */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) /* Straight forward comparison if kernel version is 3.0.0 and beyond */ # define COMPAT_LINUX_VERSION_CHECK_LT(a, b, c) LINUX_VERSION_CODE < KERNEL_VERSION (a, b, c) #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 40) /* Use b of the check to calculate corresponding c of kernel * version to compare */ # define COMPAT_LINUX_VERSION_CHECK_LT(a, b, c) LINUX_VERSION_CODE < KERNEL_VERSION (2, 6, (b + 40)) #else /* This is anyways lesser than any 3.x versions */ # define COMPAT_LINUX_VERSION_CHECK_LT(a, b, c) 1 #endif #endif /* __COMPAT_VERSION_H__ */ vmhgfs-only/shared/compat_pgtable.h 0000444 0000000 0000000 00000005563 13432725347 016446 0 ustar root root /********************************************************* * Copyright (C) 2002-2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_PGTABLE_H__ # define __COMPAT_PGTABLE_H__ #if defined(CONFIG_PARAVIRT) && defined(CONFIG_HIGHPTE) # if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 21) # include <asm/paravirt.h> # undef paravirt_map_pt_hook # define paravirt_map_pt_hook(type, va, pfn) do {} while (0) # endif #endif #include <asm/pgtable.h> /* * p4d level appeared in 4.12. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) # define compat_p4d_offset(pgd, address) p4d_offset(pgd, address) # define compat_p4d_present(p4d) p4d_present(p4d) # define compat_p4d_large(p4d) p4d_large(p4d) # define compat_p4d_pfn(p4d) p4d_pfn(p4d) # define COMPAT_P4D_MASK P4D_MASK typedef p4d_t compat_p4d_t; #else # define compat_p4d_offset(pgd, address) (pgd) # define compat_p4d_present(p4d) (1) # define compat_p4d_large(p4d) (0) # define compat_p4d_pfn(p4d) INVALID_MPN /* Not used */ # define COMPAT_P4D_MASK 0 /* Not used */ typedef pgd_t compat_p4d_t; #endif /* pud_pfn did not exist before 3.8. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) # define pud_pfn(pud) INVALID_MPN #endif /* * Define VM_PAGE_KERNEL_EXEC for vmapping executable pages. * * On ia32 PAGE_KERNEL_EXEC was introduced in 2.6.8.1. Unfortunately it accesses * __PAGE_KERNEL_EXEC which is not exported for modules. So we use * __PAGE_KERNEL and just cut _PAGE_NX bit from it. * * For ia32 kernels before 2.6.8.1 we use PAGE_KERNEL directly, these kernels * do not have noexec support. * * On x86-64 situation is a bit better: they always supported noexec, but * before 2.6.8.1 flag was named PAGE_KERNEL_EXECUTABLE, and it was renamed * to PAGE_KERNEL_EXEC when ia32 got noexec too (see above). */ #ifdef CONFIG_X86 #ifdef _PAGE_NX #define VM_PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL & ~_PAGE_NX) #else #define VM_PAGE_KERNEL_EXEC PAGE_KERNEL #endif #else #define VM_PAGE_KERNEL_EXEC PAGE_KERNEL_EXEC #endif #endif /* __COMPAT_PGTABLE_H__ */ vmhgfs-only/shared/compat_page.h 0000444 0000000 0000000 00000004663 13432725347 015744 0 ustar root root /********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_PAGE_H__ # define __COMPAT_PAGE_H__ #include <linux/mm.h> #include <asm/page.h> /* The pfn_to_page() API appeared in 2.5.14 and changed to function during 2.6.x */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) && !defined(pfn_to_page) # define pfn_to_page(_pfn) (mem_map + (_pfn)) # define page_to_pfn(_page) ((_page) - mem_map) #endif /* The virt_to_page() API appeared in 2.4.0 --hpreg */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) && !defined(virt_to_page) # define virt_to_page(_kvAddr) pfn_to_page(MAP_NR(_kvAddr)) #endif /* * The get_order() API appeared at some point in 2.3.x, and was then backported * in 2.2.17-21mdk and in the stock 2.2.18. Because we can only detect its * definition through makefile tricks, we provide our own for now --hpreg */ static inline int compat_get_order(unsigned long size) // IN { int order; size = (size - 1) >> (PAGE_SHIFT - 1); order = -1; do { size >>= 1; order++; } while (size); return order; } /* * BUG() was added to <asm/page.h> in 2.2.18, and was moved to <asm/bug.h> * in 2.5.58. * * XXX: Technically, this belongs in some sort of "compat_asm_page.h" file, but * since our compatibility wrappers don't distinguish between <asm/xxx.h> and * <linux/xxx.h>, putting it here is reasonable. */ #ifndef BUG #define BUG() do { \ printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \ __asm__ __volatile__(".byte 0x0f,0x0b"); \ } while (0) #endif #endif /* __COMPAT_PAGE_H__ */ vmhgfs-only/shared/includeCheck.h 0000444 0000000 0000000 00000007463 13432725350 016041 0 ustar root root /********************************************************* * Copyright (C) 1998-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * includeCheck.h -- * * Restrict include file use. * * In every .h file, define one or more of these * * INCLUDE_ALLOW_VMX * INCLUDE_ALLOW_USERLEVEL * INCLUDE_ALLOW_VMCORE * INCLUDE_ALLOW_MODULE * INCLUDE_ALLOW_VMKERNEL * INCLUDE_ALLOW_DISTRIBUTE * INCLUDE_ALLOW_VMK_MODULE * INCLUDE_ALLOW_VMKDRIVERS * INCLUDE_ALLOW_MKS * * Then include this file. * * Any file that has INCLUDE_ALLOW_DISTRIBUTE defined will potentially * be distributed in source form along with GPLed code. Ensure * that this is acceptable. */ /* * Declare a VMCORE-only variable to help classify object * files. The variable goes in the common block and does * not create multiple definition link-time conflicts. */ #if defined VMCORE && defined VMX86_DEVEL && defined VMX86_DEBUG && \ defined linux && !defined MODULE && \ !defined COMPILED_WITH_VMCORE #define COMPILED_WITH_VMCORE compiled_with_vmcore #ifdef ASM .comm compiled_with_vmcore, 0 #else asm(".comm compiled_with_vmcore, 0"); #endif /* ASM */ #endif #if defined VMCORE && \ !(defined VMX86_VMX || defined VMM || \ defined MONITOR_APP || defined VMMON) #error "Makefile problem: VMCORE without VMX86_VMX or \ VMM or MONITOR_APP or MODULE." #endif #if defined VMCORE && !defined INCLUDE_ALLOW_VMCORE #error "The surrounding include file is not allowed in vmcore." #endif #undef INCLUDE_ALLOW_VMCORE #if defined VMX86_VMX && !defined VMCORE && \ !defined INCLUDE_ALLOW_VMX && !defined INCLUDE_ALLOW_USERLEVEL && \ !defined INCLUDE_ALLOW_MKS #error "The surrounding include file is not allowed in the VMX." #endif #undef INCLUDE_ALLOW_VMX #if defined USERLEVEL && !defined VMX86_VMX && !defined VMCORE && \ !defined INCLUDE_ALLOW_USERLEVEL && !defined INCLUDE_ALLOW_MKS #error "The surrounding include file is not allowed at userlevel." #endif #undef INCLUDE_ALLOW_USERLEVEL #if defined MODULE && !defined VMKERNEL_MODULE && \ !defined VMMON && !defined INCLUDE_ALLOW_MODULE #error "The surrounding include file is not allowed in driver modules." #endif #undef INCLUDE_ALLOW_MODULE #if defined VMMON && !defined INCLUDE_ALLOW_VMMON #error "The surrounding include file is not allowed in vmmon." #endif #undef INCLUDE_ALLOW_VMMON #if defined VMKERNEL && !defined INCLUDE_ALLOW_VMKERNEL #error "The surrounding include file is not allowed in the vmkernel." #endif #undef INCLUDE_ALLOW_VMKERNEL #if defined GPLED_CODE && !defined INCLUDE_ALLOW_DISTRIBUTE #error "The surrounding include file is not allowed in GPL code." #endif #undef INCLUDE_ALLOW_DISTRIBUTE #if defined VMKERNEL_MODULE && !defined VMKERNEL && \ !defined INCLUDE_ALLOW_VMK_MODULE && !defined INCLUDE_ALLOW_VMKDRIVERS #error "The surrounding include file is not allowed in vmkernel modules." #endif #undef INCLUDE_ALLOW_VMK_MODULE #undef INCLUDE_ALLOW_VMKDRIVERS #if defined INCLUDE_ALLOW_MKS && !(defined COREMKS) #error "The surrounding include file is not allowed outside of the MKS." #endif #undef INCLUDE_ALLOW_MKS vmhgfs-only/shared/compat_pci.h 0000444 0000000 0000000 00000005152 13432725347 015575 0 ustar root root /********************************************************* * Copyright (C) 1999 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * compat_pci.h: PCI compatibility wrappers. */ #ifndef __COMPAT_PCI_H__ #define __COMPAT_PCI_H__ #include "compat_ioport.h" #include <linux/pci.h> #ifndef DMA_BIT_MASK # define DMA_BIT_MASK(n) DMA_##n##BIT_MASK #endif /* * Power Management related compat wrappers. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 10) # define compat_pci_save_state(pdev) pci_save_state((pdev), NULL) # define compat_pci_restore_state(pdev) pci_restore_state((pdev), NULL) #else # define compat_pci_save_state(pdev) pci_save_state((pdev)) # define compat_pci_restore_state(pdev) pci_restore_state((pdev)) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 11) # define pm_message_t u32 # define compat_pci_choose_state(pdev, state) (state) # define PCI_D0 0 # define PCI_D3hot 3 #else # define compat_pci_choose_state(pdev, state) pci_choose_state((pdev), (state)) #endif /* 2.6.14 changed the PCI shutdown callback */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14) # define COMPAT_PCI_SHUTDOWN(func) .driver = { .shutdown = (func), } # define COMPAT_PCI_DECLARE_SHUTDOWN(func, var) (func)(struct device *(var)) # define COMPAT_PCI_TO_DEV(dev) (to_pci_dev(dev)) #else # define COMPAT_PCI_SHUTDOWN(func) .shutdown = (func) # define COMPAT_PCI_DECLARE_SHUTDOWN(func, var) (func)(struct pci_dev *(var)) # define COMPAT_PCI_TO_DEV(dev) (dev) #endif /* 2.6.26 introduced the device_set_wakeup_enable() function */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26) # define compat_device_set_wakeup_enable(dev, val) do {} while(0) #else # define compat_device_set_wakeup_enable(dev, val) \ device_set_wakeup_enable(dev, val) #endif #endif /* __COMPAT_PCI_H__ */ vmhgfs-only/shared/kernelStubs.h 0000444 0000000 0000000 00000021124 13432725330 015745 0 ustar root root /********************************************************* * Copyright (C) 2006-2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * kernelStubs.h * * KernelStubs implements some userspace library functions in terms * of kernel functions to allow library userspace code to be used in a * kernel. */ #ifndef __KERNELSTUBS_H__ #define __KERNELSTUBS_H__ #define KRNL_STUBS_DRIVER_TYPE_POSIX 1 #define KRNL_STUBS_DRIVER_TYPE_GDI 2 #define KRNL_STUBS_DRIVER_TYPE_WDM 3 #define KRNL_STUBS_DRIVER_TYPE_NDIS 4 // For now (vsphere-2015), choose a good default. Later we'll modify all the // build files using KernelStubs to set this. #ifndef KRNL_STUBS_DRIVER_TYPE # if defined(_WIN32) # define KRNL_STUBS_DRIVER_TYPE KRNL_STUBS_DRIVER_TYPE_WDM # else # define KRNL_STUBS_DRIVER_TYPE KRNL_STUBS_DRIVER_TYPE_POSIX # endif #endif #ifdef linux # ifndef __KERNEL__ # error "__KERNEL__ is not defined" # endif # include "driver-config.h" // Must be included before any other header files # include "vm_basic_types.h" # include <linux/kernel.h> # include <linux/string.h> #elif defined(_WIN32) # define _CRT_ALLOCATION_DEFINED // prevent malloc.h from defining malloc et. all # if KRNL_STUBS_DRIVER_TYPE == KRNL_STUBS_DRIVER_TYPE_GDI # include <d3d9.h> # include <winddi.h> # include <stdio.h> # include "vm_basic_types.h" # include "vm_basic_defs.h" # include "vm_assert.h" # elif KRNL_STUBS_DRIVER_TYPE == KRNL_STUBS_DRIVER_TYPE_NDIS # include <ntddk.h> # include <stdio.h> /* for _vsnprintf, vsprintf */ # include <stdarg.h> /* for va_start stuff */ # include "vm_basic_defs.h" # include "vm_assert.h" # include "kernelStubsFloorFixes.h" #pragma warning(disable:4201) // unnamed struct/union # include <ndis.h> # elif KRNL_STUBS_DRIVER_TYPE == KRNL_STUBS_DRIVER_TYPE_WDM # include "vm_basic_types.h" # if defined(NTDDI_WINXP) && (NTDDI_VERSION >= NTDDI_WINXP) # include <wdm.h> /* kernel memory APIs, DbgPrintEx */ # else # include <ntddk.h> /* kernel memory APIs */ # endif # include <stdio.h> /* for _vsnprintf, vsprintf */ # include <stdarg.h> /* for va_start stuff */ # include <stdlib.h> /* for min macro. */ # include "vm_basic_defs.h" # include "vm_assert.h" /* Our assert macros */ # include "kernelStubsFloorFixes.h" # else # error Type KRNL_STUBS_DRIVER_TYPE must be defined. # endif #elif defined(__FreeBSD__) # include "vm_basic_types.h" # ifndef _KERNEL # error "_KERNEL is not defined" # endif # include <sys/types.h> # include <sys/malloc.h> # include <sys/param.h> # include <sys/kernel.h> # include <machine/stdarg.h> # include <sys/libkern.h> #elif defined(__APPLE__) # include "vm_basic_types.h" # ifndef KERNEL # error "KERNEL is not defined" # endif # include <stdarg.h> # include <string.h> # elif defined(sun) # include "vm_basic_types.h" # include <sys/types.h> # include <sys/varargs.h> #endif #include "kernelStubsSal.h" /* * Function Prototypes */ #if defined(__linux__) || defined(__APPLE__) || defined (sun) # ifdef linux /* if (linux) { */ char *strdup(const char *source); # endif /* Shared between Linux and Apple kernel stubs. */ void *malloc(size_t size); void free(void *mem); void *calloc(size_t num, size_t len); void *realloc(void *ptr, size_t newSize); #elif defined(_WIN32) /* } else if (_WIN32) { */ _Ret_allocates_malloc_mem_opt_bytecap_(_Size) _When_windrv_(_IRQL_requires_max_(DISPATCH_LEVEL)) _CRTNOALIAS _CRTRESTRICT void * __cdecl malloc( _In_ size_t _Size); _Ret_allocates_malloc_mem_opt_bytecount_(_Count*_Size) _When_windrv_(_IRQL_requires_max_(DISPATCH_LEVEL)) _CRTNOALIAS _CRTRESTRICT void * __cdecl calloc( _In_ size_t _Count, _In_ size_t _Size); _When_windrv_(_IRQL_requires_max_(DISPATCH_LEVEL)) _CRTNOALIAS void __cdecl free( _In_frees_malloc_mem_opt_ void * _Memory); _Success_(return != 0) _When_(_Memory != 0, _Ret_reallocates_malloc_mem_opt_newbytecap_oldbytecap_(_NewSize, ((uintptr_t*)_Memory)[-1])) _When_(_Memory == 0, _Ret_reallocates_malloc_mem_opt_newbytecap_(_NewSize)) _When_windrv_(_IRQL_requires_max_(DISPATCH_LEVEL)) _CRTNOALIAS _CRTRESTRICT void * __cdecl realloc( _In_reallocates_malloc_mem_opt_oldptr_ void * _Memory, _In_ size_t _NewSize); _Success_(return != 0) _Ret_allocates_malloc_mem_opt_z_ _When_windrv_(_IRQL_requires_max_(DISPATCH_LEVEL)) _CRTIMP char * __cdecl _strdup_impl( _In_opt_z_ const char * _Src); #define strdup _strdup_impl #elif defined(__FreeBSD__) /* } else if (FreeBSD) { */ /* Kernel memory on FreeBSD is tagged for statistics and sanity checking. */ MALLOC_DECLARE(M_VMWARE_TEMP); /* * On FreeBSD, the general memory allocator for both userland and the kernel is named * malloc, but the kernel malloc() takes more arguments. The following alias & macros * work around this, to provide the standard malloc() API for userspace code that is * being used in the kernel. */ # undef malloc static INLINE void * __compat_malloc(unsigned long size, struct malloc_type *type, int flags) { return malloc(size, type, flags); } # define malloc(size) __compat_malloc(size, M_VMWARE_TEMP, M_NOWAIT) # define calloc(count, size) __compat_malloc((count) * (size), \ M_VMWARE_TEMP, M_NOWAIT|M_ZERO) # define realloc(buf, size) realloc(buf, size, M_VMWARE_TEMP, M_NOWAIT) # define free(buf) free(buf, M_VMWARE_TEMP) # define strchr(s,c) index(s,c) # define strrchr(s,c) rindex(s,c) #endif /* } */ _Ret_writes_z_(maxSize) char *Str_Strcpy( _Out_z_cap_(maxSize) char *buf, _In_z_ const char *src, _In_ size_t maxSize); _Ret_writes_z_(maxSize) char *Str_Strcat( _Inout_z_cap_(maxSize) char *buf, _In_z_ const char *src, _In_ size_t maxSize); _Success_(return >= 0) int Str_Sprintf( _Out_z_cap_(maxSize) _Post_z_count_(return+1) char *buf, _In_ size_t maxSize, _In_z_ _Printf_format_string_ const char *fmt, ...) PRINTF_DECL(3, 4); _Success_(return != -1) int Str_Vsnprintf( _Out_z_cap_(size) _Post_z_count_(return+1) char *str, _In_ size_t size, _In_z_ _Printf_format_string_ const char *format, _In_ va_list ap) PRINTF_DECL(3, 0); _Success_(return != 0) _When_(length != 0, _Ret_allocates_malloc_mem_opt_z_bytecount_(*length)) _When_(length == 0, _Ret_allocates_malloc_mem_opt_z_) _When_windrv_(_IRQL_requires_max_(DISPATCH_LEVEL)) char *Str_Vasprintf( _Out_opt_ size_t *length, _In_z_ _Printf_format_string_ const char *format, _In_ va_list arguments) PRINTF_DECL(2, 0); _Success_(return != 0) _When_(length != 0, _Ret_allocates_malloc_mem_opt_z_bytecount_(*length)) _When_(length == 0, _Ret_allocates_malloc_mem_opt_z_) _When_windrv_(_IRQL_requires_max_(DISPATCH_LEVEL)) char *Str_Asprintf( _Out_opt_ size_t *length, _In_z_ _Printf_format_string_ const char *format, ...) PRINTF_DECL(2, 3); #ifdef _WIN32 #pragma warning(push) #pragma warning(disable: 28301) // Suppress complaint that first declaration lacked annotations #endif // For now (vsphere-2015), we don't implement Panic, Warning, or Debug in the // GDI case. #if (KRNL_STUBS_DRIVER_TYPE != KRNL_STUBS_DRIVER_TYPE_GDI) &&\ (KRNL_STUBS_DRIVER_TYPE != KRNL_STUBS_DRIVER_TYPE_NDIS) /* * Stub functions we provide. */ #ifdef _WIN32 NORETURN #endif void Panic( _In_z_ _Printf_format_string_ const char *fmt, ...) PRINTF_DECL(1, 2); void Warning( _In_z_ _Printf_format_string_ const char *fmt, ...) PRINTF_DECL(1, 2); /* * Functions the driver must implement for the stubs. */ EXTERN void Debug( _In_z_ _Printf_format_string_ const char *fmt, ...) PRINTF_DECL(1, 2); #endif // KRNL_STUBS_DRIVER_TYPE != KRNL_STUBS_DRIVER_TYPE_GDI #ifdef _WIN32 #pragma warning(pop) #endif #endif /* __KERNELSTUBS_H__ */ vmhgfs-only/shared/compat_log2.h 0000444 0000000 0000000 00000003672 13432725347 015672 0 ustar root root /********************************************************* * Copyright (C) 2011 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_LOG2_H__ # define __COMPAT_LOG2_H__ #ifndef LINUX_VERSION_CODE # error "Include compat_version.h before compat_log2.h" #endif /* linux/log2.h was introduced in 2.6.20. */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 19) # include <linux/log2.h> #endif /* * is_power_of_2 was introduced in 2.6.21. This implementation is almost * identical to the one found there. */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 20) #define compat_is_power_of_2(n) is_power_of_2(n) #else static inline __attribute__((const)) int compat_is_power_of_2(unsigned long n) { return (n != 0 && ((n && (n - 1)) == 0)); } #endif /* * rounddown_power_of_two was introduced in 2.6.24. This implementation is * similar to the one in log2.h but with input of int instead of long to * avoid more version related checks for fls_long(). */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) #define compat_rounddown_pow_of_two(n) rounddown_pow_of_two(n) #else static inline __attribute__((const)) unsigned int compat_rounddown_pow_of_two(unsigned int n) { return 1U << (fls(n) -1); } #endif #endif /* __COMPAT_LOG2_H__ */ vmhgfs-only/shared/compat_cred.h 0000444 0000000 0000000 00000003244 13432725347 015737 0 ustar root root /********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_CRED_H__ # define __COMPAT_CRED_H__ /* * Include linux/cred.h via linux/sched.h - it is not nice, but * as cpp does not have #ifexist... */ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) #include <linux/sched.h> #else #include <linux/cred.h> #endif #if !defined(current_fsuid) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 29) #define current_uid() (current->uid) #define current_euid() (current->euid) #define current_fsuid() (current->fsuid) #define current_gid() (current->gid) #define current_egid() (current->egid) #define current_fsgid() (current->fsgid) #endif #if !defined(cap_set_full) /* cap_set_full was removed in kernel version 3.0-rc4. */ #define cap_set_full(_c) do { (_c) = CAP_FULL_SET; } while (0) #endif #if !defined(GLOBAL_ROOT_UID) #define GLOBAL_ROOT_UID (0) #endif #endif /* __COMPAT_CRED_H__ */ vmhgfs-only/shared/compat_ioport.h 0000444 0000000 0000000 00000004041 13432725347 016332 0 ustar root root /********************************************************* * Copyright (C) 2003 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_IOPORT_H__ # define __COMPAT_IOPORT_H__ #include <linux/ioport.h> #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0) static inline void * compat_request_region(unsigned long start, unsigned long len, const char *name) { if (check_region(start, len)) { return NULL; } request_region(start, len, name); return (void*)1; } #else #define compat_request_region(start, len, name) request_region(start, len, name) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 7) /* mmap io support starts from 2.3.7, fail the call for kernel prior to that */ static inline void * compat_request_mem_region(unsigned long start, unsigned long len, const char *name) { return NULL; } static inline void compat_release_mem_region(unsigned long start, unsigned long len) { return; } #else #define compat_request_mem_region(start, len, name) request_mem_region(start, len, name) #define compat_release_mem_region(start, len) release_mem_region(start, len) #endif /* these two macro defs are needed by compat_pci_request_region */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 15) # define IORESOURCE_IO 0x00000100 # define IORESOURCE_MEM 0x00000200 #endif #endif /* __COMPAT_IOPORT_H__ */ vmhgfs-only/shared/vm_atomic.h 0000444 0000000 0000000 00000305435 13432725350 015436 0 ustar root root /********************************************************* * Copyright (C) 1998-2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vm_atomic.h -- * * Atomic power * * Note: Only partially tested on ARM processors: Works for View Open * Client, which shouldn't have threads, and ARMv8 processors. * * In ARM, GCC intrinsics (__sync*) compile but might not * work, while MS intrinsics (_Interlocked*) do not compile. */ #ifndef _ATOMIC_H_ #define _ATOMIC_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMKDRIVERS #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" #include "vm_basic_types.h" #include "vm_assert.h" #if defined __cplusplus extern "C" { #endif /* * There are two concepts involved when dealing with atomic accesses: * 1. Atomicity of the access itself * 2. Ordering of the access with respect to other reads&writes (from the view * of other processors/devices). * * Two examples help to clarify #2: * a. Inc: A caller implementing a simple independent global event counter * might not care if the compiler or processor visibly reorders the * increment around other memory accesses. * b. Dec: A caller implementing a reference count absolutely *doesn't* want * the compiler or processor to visibly reordering writes after that * decrement: if that happened, the program could then end up writing * to memory that was freed by another processor. * * C11 has standardized a good model for expressing these orderings when doing * atomics. It defines three *tiers* of ordering: * 1. Sequential Consistency (every processor sees the same total order of * events) * * 2. Acquire/Release ordering (roughly, everybody can agree previous events * have completed, but they might disagree on the ordering of previous * independent events). * * The relative ordering provided by this tier is sufficient for common * locking and initialization activities, but is insufficient for unusual * synchronization schemes (e.g. IRIW aka Independent Read Independent * Write designs such Dekker's algorithm, Peterson's algorithm, etc.) * * In other words, this tier is close in behavior to Sequential Consistency * in much the same way a General-Relativity universe is close to a * Newtonian universe. * 3. Relaxed (i.e unordered/unfenced) * * In C11 standard's terminology for atomic memory ordering, * - in case (a) we want "relaxed" ordering for perf and, * - in case (b) we want "sequentially consistent" ordering (or perhaps the * only slightly weaker "release" ordering) for correctness. * * There are standardized mappings of operations to orderings for every * processor architecture. See * - https://www.cl.cam.ac.uk/~pes20/cpp/cpp0xmappings.html * - http://preshing.com/20120913/acquire-and-release-semantics/ * * In this file: * 1. all RMW (Read/Modify/Write) operations are sequentially consistent. * This includes operations like Atomic_IncN, Atomic_ReadIfEqualWriteN, * Atomic_ReadWriteN, etc. * 2. all R and W operations are relaxed. This includes operations like * Atomic_WriteN, Atomic_ReadN, Atomic_TestBitN, etc. * * The below routines of course ensure both the CPU and compiler honor the * ordering constraint. * * Notes: * 1. Since R-only and W-only operations do not provide ordering, callers * using them for synchronizing operations like double-checked * initialization or releasing spinlocks must provide extra barriers. * 2. This implementation of Atomic operations is suboptimal. On x86,simple * reads and writes have acquire/release semantics at the hardware level. * On arm64, we have separate instructions for sequentially consistent * reads and writes (the same instructions are used for acquire/release). * Neither of these are exposed for R-only or W-only callers. * * For further details on x86 and ARM memory ordering see * https://wiki.eng.vmware.com/ARM/MemoryOrdering. */ #ifdef VM_ARM_64 # include "vm_atomic_arm64_begin.h" #endif /* Basic atomic types: 8, 16, 32, 64 and 128 bits */ typedef struct Atomic_uint8 { volatile uint8 value; } Atomic_uint8 ALIGNED(1); typedef struct Atomic_uint16 { volatile uint16 value; } Atomic_uint16 ALIGNED(2); typedef struct Atomic_uint32 { volatile uint32 value; } Atomic_uint32 ALIGNED(4); typedef struct Atomic_uint64 { volatile uint64 value; } Atomic_uint64 ALIGNED(8); #if defined __GNUC__ && defined VM_64BIT && \ (defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 || defined VM_ARM_64) typedef struct Atomic_uint128 { volatile uint128 value; } Atomic_uint128 ALIGNED(16); #endif /* * Prototypes for msft atomics. These are defined & inlined by the * compiler so no function definition is needed. The prototypes are * needed for C++. * * The declarations for the intrinsic functions were taken from ntddk.h * in the DDK. The declarations must match otherwise the 64-bit C++ * compiler will complain about second linkage of the intrinsic functions. * We define the intrinsic using the basic types corresponding to the * Windows typedefs. This avoids having to include windows header files * to get to the windows types. */ #if defined _MSC_VER && !defined BORA_NO_WIN32_INTRINS #ifdef __cplusplus extern "C" { #endif long _InterlockedExchange(long volatile*, long); long _InterlockedCompareExchange(long volatile*, long, long); long _InterlockedExchangeAdd(long volatile*, long); long _InterlockedDecrement(long volatile*); long _InterlockedIncrement(long volatile*); __int64 _InterlockedCompareExchange64(__int64 volatile*, __int64, __int64); #pragma intrinsic(_InterlockedExchange, _InterlockedCompareExchange) #pragma intrinsic(_InterlockedExchangeAdd, _InterlockedDecrement) #pragma intrinsic(_InterlockedIncrement) #pragma intrinsic(_InterlockedCompareExchange64) # if _MSC_VER >= 1600 char _InterlockedExchange8(char volatile *, char); char _InterlockedCompareExchange8(char volatile *, char, char); #pragma intrinsic(_InterlockedCompareExchange8, _InterlockedCompareExchange8) #endif #if defined VM_X86_64 long _InterlockedAnd(long volatile*, long); __int64 _InterlockedAnd64(__int64 volatile*, __int64); long _InterlockedOr(long volatile*, long); __int64 _InterlockedOr64(__int64 volatile*, __int64); long _InterlockedXor(long volatile*, long); __int64 _InterlockedXor64(__int64 volatile*, __int64); __int64 _InterlockedExchangeAdd64(__int64 volatile*, __int64); __int64 _InterlockedIncrement64(__int64 volatile*); __int64 _InterlockedDecrement64(__int64 volatile*); __int64 _InterlockedExchange64(__int64 volatile*, __int64); #if !defined _WIN64 #pragma intrinsic(_InterlockedAnd, _InterlockedAnd64) #pragma intrinsic(_InterlockedOr, _InterlockedOr64) #pragma intrinsic(_InterlockedXor, _InterlockedXor64) #pragma intrinsic(_InterlockedExchangeAdd64, _InterlockedIncrement64) #pragma intrinsic(_InterlockedDecrement64, _InterlockedExchange64) #endif /* !_WIN64 */ #endif /* __x86_64__ */ #ifdef __cplusplus } #endif #endif /* _MSC_VER */ #if defined __arm__ /* * LDREX without STREX or CLREX may cause problems in environments where the * context switch may not clear the reference monitor - according ARM manual * the reference monitor should be cleared after a context switch, but some * may not like Linux kernel's non-preemptive context switch path. So use of * ARM routines in kernel code may not be safe. */ # if defined __ARM_ARCH_7__ || defined __ARM_ARCH_7A__ || \ defined __ARM_ARCH_7R__|| defined __ARM_ARCH_7M__ # define VM_ARM_V7 # ifdef __KERNEL__ # warning LDREX/STREX may not be safe in linux kernel, since it \ does not issue CLREX on context switch (as of 2011-09-29). # endif # else # error Only ARMv7 extends the synchronization primitives ldrex/strex. \ For the lower ARM version, please implement the atomic functions \ by kernel APIs. # endif #endif /* Data Memory Barrier */ #ifdef VM_ARM_V7 #define dmb() __asm__ __volatile__("dmb" : : : "memory") #endif /* Convert a volatile uint32 to Atomic_uint32. */ static INLINE Atomic_uint32 * Atomic_VolatileToAtomic32(volatile uint32 *var) // IN: { return (Atomic_uint32 *)var; } #define Atomic_VolatileToAtomic Atomic_VolatileToAtomic32 /* Convert a volatile uint64 to Atomic_uint64. */ static INLINE Atomic_uint64 * Atomic_VolatileToAtomic64(volatile uint64 *var) // IN: { return (Atomic_uint64 *)var; } /* * The Read/Modify/Write operations on x86/x64 are all written using the * "memory" constraint. This is to ensure the compiler treats the operation as * a full barrier, flushing any pending/cached state currently residing in * registers. */ #if defined _MSC_VER && _MSC_VER < 1600 && defined __x86_64__ uint8 VMWInterlockedExchange8(uint8 volatile *ptr, uint8 val); uint8 VMWInterlockedCompareExchange8(uint8 volatile *ptr, uint8 newVal, uint8 oldVal); #endif #if defined __GNUC__ && defined VM_ARM_32 /* Force the link step to fail for unimplemented functions. */ extern int AtomicUndefined(void const *); #endif /* *----------------------------------------------------------------------------- * * Atomic_ReadIfEqualWrite128 -- * * Compare and exchange a 16 byte tuple. * * Results: * old value * * Side effects: * None * *----------------------------------------------------------------------------- */ #if defined __GNUC__ && defined VM_64BIT && \ (defined __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 || defined VM_ARM_64) static INLINE uint128 Atomic_ReadIfEqualWrite128(Atomic_uint128 *ptr, // IN/OUT uint128 oldVal, // IN uint128 newVal) // IN { #ifdef __GCC_HAVE_SYNC_COMPARE_AND_SWAP_16 return __sync_val_compare_and_swap(&ptr->value, oldVal, newVal); #elif defined VM_ARM_64 union { uint128 raw; struct { uint64 lo; uint64 hi; }; } res, _old = { oldVal }, _new = { newVal }; uint32 failed; SMP_RW_BARRIER_RW(); __asm__ __volatile__( "1: ldxp %x0, %x1, %3 \n\t" " cmp %x0, %x4 \n\t" " ccmp %x1, %x5, #0, eq \n\t" " b.ne 2f \n\t" " stxp %w2, %x6, %x7, %3 \n\t" " cbnz %w2, 1b \n\t" "2: \n\t" : "=&r" (res.lo), "=&r" (res.hi), "=&r" (failed), "+Q" (ptr->value) : "r" (_old.lo), "r" (_old.hi), "r" (_new.lo), "r" (_new.hi) : "cc" ); SMP_RW_BARRIER_RW(); return res.raw; #endif } #endif /* *----------------------------------------------------------------------------- * * Atomic_Read8 -- * * Read the value of the specified object atomically. * * Results: * The value of the atomic variable. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE uint8 Atomic_Read8(Atomic_uint8 const *var) // IN: { uint8 val; #if defined __GNUC__ && defined VM_ARM_32 val = AtomicUndefined(var); #elif defined VM_ARM_64 val = _VMATOM_X(R, 8, &var->value); #elif defined __GNUC__ && (defined __i386__ || defined __x86_64__) __asm__ __volatile__( "movb %1, %0" : "=q" (val) : "m" (var->value) ); #elif defined _MSC_VER val = var->value; #else #error No compiler defined for Atomic_Read8 #endif return val; } /* *----------------------------------------------------------------------------- * * Atomic_ReadWrite8 -- * * Read followed by write. * * Results: * The value of the atomic variable before the write. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint8 Atomic_ReadWrite8(Atomic_uint8 *var, // IN/OUT: uint8 val) // IN: { #if defined __GNUC__ && defined VM_ARM_32 return AtomicUndefined(var + val); #elif defined VM_ARM_64 return _VMATOM_X(RW, 8, TRUE, &var->value, val); #elif defined __GNUC__ && (defined __i386__ || defined __x86_64__) __asm__ __volatile__( "xchgb %0, %1" : "=q" (val), "+m" (var->value) : "0" (val) : "memory" ); return val; #elif defined _MSC_VER && _MSC_VER >= 1600 return _InterlockedExchange8((volatile char *)&var->value, val); #elif defined _MSC_VER && defined __i386__ #pragma warning(push) #pragma warning(disable : 4035) // disable no-return warning { __asm movzx eax, val __asm mov ebx, var __asm xchg [ebx]Atomic_uint8.value, al } #pragma warning(pop) #elif defined _MSC_VER && defined __x86_64__ return VMWInterlockedExchange8(&var->value, val); #else #error No compiler defined for Atomic_ReadWrite8 #endif } /* *----------------------------------------------------------------------------- * * Atomic_Write8 -- * * Write the specified value to the specified object atomically. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Write8(Atomic_uint8 *var, // IN/OUT: uint8 val) // IN: { #if defined __GNUC__ && defined VM_ARM_32 AtomicUndefined(var + val); #elif defined VM_ARM_64 _VMATOM_X(W, 8, &var->value, val); #elif defined __GNUC__ && (defined __i386__ || defined __x86_64__) __asm__ __volatile__( "movb %1, %0" : "=m" (var->value) : "qn" (val) ); #elif defined _MSC_VER var->value = val; #else #error No compiler defined for Atomic_Write8 #endif } /* *----------------------------------------------------------------------------- * * Atomic_ReadIfEqualWrite8 -- * * Compare exchange: Read variable, if equal to oldVal, write newVal. * * Results: * The value of the atomic variable before the write. * * Side effects: * The variable may be modified. * *----------------------------------------------------------------------------- */ static INLINE uint8 Atomic_ReadIfEqualWrite8(Atomic_uint8 *var, // IN/OUT: uint8 oldVal, // IN: uint8 newVal) // IN: { #if defined __GNUC__ && defined VM_ARM_32 return AtomicUndefined(var + oldVal + newVal); #elif defined VM_ARM_64 return _VMATOM_X(RIFEQW, 8, TRUE, &var->value, oldVal, newVal); #elif defined __GNUC__ && (defined __i386__ || defined __x86_64__) uint8 val; __asm__ __volatile__( "lock; cmpxchgb %2, %1" : "=a" (val), "+m" (var->value) : "q" (newVal), "0" (oldVal) : "cc", "memory" ); return val; #elif defined _MSC_VER && _MSC_VER >= 1600 return _InterlockedCompareExchange8((volatile char *)&var->value, newVal, oldVal); #elif defined _MSC_VER && defined __i386__ #pragma warning(push) #pragma warning(disable : 4035) // disable no-return warning { __asm mov al, oldVal __asm mov ebx, var __asm mov cl, newVal __asm lock cmpxchg [ebx]Atomic_uint8.value, cl __asm movzx eax, al // eax is the return value, this is documented to work - edward } #pragma warning(pop) #elif defined _MSC_VER && defined __x86_64__ return VMWInterlockedCompareExchange8(&var->value, newVal, oldVal); #else #error No compiler defined for Atomic_ReadIfEqualWrite8 #endif } /* *----------------------------------------------------------------------------- * * Atomic_ReadAnd8 -- * * Atomic read (returned), bitwise AND with a value, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint8 Atomic_ReadAnd8(Atomic_uint8 *var, // IN/OUT uint8 val) // IN { uint8 res; #if defined VM_ARM_64 res = _VMATOM_X(ROP, 8, TRUE, &var->value, and, val); #else do { res = Atomic_Read8(var); } while (res != Atomic_ReadIfEqualWrite8(var, res, res & val)); #endif return res; } /* *----------------------------------------------------------------------------- * * Atomic_And8 -- * * Atomic read, bitwise AND with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_And8(Atomic_uint8 *var, // IN/OUT uint8 val) // IN { #if defined VM_ARM_64 _VMATOM_X(OP, 8, TRUE, &var->value, and, val); #else (void)Atomic_ReadAnd8(var, val); #endif } /* *----------------------------------------------------------------------------- * * Atomic_ReadOr8 -- * * Atomic read (returned), bitwise OR with a value, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint8 Atomic_ReadOr8(Atomic_uint8 *var, // IN/OUT uint8 val) // IN { uint8 res; #if defined VM_ARM_64 res = _VMATOM_X(ROP, 8, TRUE, &var->value, orr, val); #else do { res = Atomic_Read8(var); } while (res != Atomic_ReadIfEqualWrite8(var, res, res | val)); #endif return res; } /* *----------------------------------------------------------------------------- * * Atomic_Or8 -- * * Atomic read, bitwise OR with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Or8(Atomic_uint8 *var, // IN/OUT uint8 val) // IN { #if defined VM_ARM_64 _VMATOM_X(OP, 8, TRUE, &var->value, orr, val); #else (void)Atomic_ReadOr8(var, val); #endif } /* *----------------------------------------------------------------------------- * * Atomic_ReadXor8 -- * * Atomic read (returned), bitwise XOR with a value, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint8 Atomic_ReadXor8(Atomic_uint8 *var, // IN/OUT uint8 val) // IN { uint8 res; #if defined VM_ARM_64 res = _VMATOM_X(ROP, 8, TRUE, &var->value, eor, val); #else do { res = Atomic_Read8(var); } while (res != Atomic_ReadIfEqualWrite8(var, res, res ^ val)); #endif return res; } /* *----------------------------------------------------------------------------- * * Atomic_Xor8 -- * * Atomic read, bitwise XOR with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Xor8(Atomic_uint8 *var, // IN/OUT uint8 val) // IN { #if defined VM_ARM_64 _VMATOM_X(OP, 8, TRUE, &var->value, eor, val); #else (void)Atomic_ReadXor8(var, val); #endif } /* *----------------------------------------------------------------------------- * * Atomic_ReadAdd8 -- * * Atomic read (returned), add a value, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint8 Atomic_ReadAdd8(Atomic_uint8 *var, // IN/OUT uint8 val) // IN { uint8 res; #if defined VM_ARM_64 res = _VMATOM_X(ROP, 8, TRUE, &var->value, add, val); #else do { res = Atomic_Read8(var); } while (res != Atomic_ReadIfEqualWrite8(var, res, res + val)); #endif return res; } /* *----------------------------------------------------------------------------- * * Atomic_Add8 -- * * Atomic read, add a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Add8(Atomic_uint8 *var, // IN/OUT uint8 val) // IN { #if defined VM_ARM_64 _VMATOM_X(OP, 8, TRUE, &var->value, add, val); #else (void)Atomic_ReadAdd8(var, val); #endif } /* *----------------------------------------------------------------------------- * * Atomic_Sub8 -- * * Atomic read, subtract a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Sub8(Atomic_uint8 *var, // IN/OUT uint8 val) // IN { #if defined VM_ARM_64 _VMATOM_X(OP, 8, TRUE, &var->value, sub, val); #else Atomic_Add8(var, -val); #endif } /* *----------------------------------------------------------------------------- * * Atomic_Inc8 -- * * Atomic read, increment, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Inc8(Atomic_uint8 *var) // IN/OUT { Atomic_Add8(var, 1); } /* *----------------------------------------------------------------------------- * * Atomic_Dec8 -- * * Atomic read, decrement, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Dec8(Atomic_uint8 *var) // IN/OUT { Atomic_Sub8(var, 1); } /* *----------------------------------------------------------------------------- * * Atomic_ReadInc8 -- * * Atomic read (returned), increment, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint8 Atomic_ReadInc8(Atomic_uint8 *var) // IN/OUT { return Atomic_ReadAdd8(var, 1); } /* *----------------------------------------------------------------------------- * * Atomic_ReadDec8 -- * * Atomic read (returned), decrement, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint8 Atomic_ReadDec8(Atomic_uint8 *var) // IN/OUT { return Atomic_ReadAdd8(var, (uint8)-1); } /* *----------------------------------------------------------------------------- * * Atomic_Read32 -- * * Read * * Results: * The value of the atomic variable. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_Read32(Atomic_uint32 const *var) // IN { uint32 value; #if defined VMM || defined VM_ARM_64 || defined VMKERNEL || defined VMKERNEL_MODULE ASSERT(((uintptr_t)var % 4) == 0); #endif #if defined __GNUC__ /* * Use inline assembler to force using a single load instruction to * ensure that the compiler doesn't split a transfer operation into multiple * instructions. */ #if defined VM_ARM_32 __asm__ __volatile__( "ldr %0, [%1]" : "=r" (value) : "r" (&var->value) ); #elif defined VM_ARM_64 value = _VMATOM_X(R, 32, &var->value); #else __asm__ __volatile__( "mov %1, %0" : "=r" (value) : "m" (var->value) ); #endif #elif defined _MSC_VER /* * Microsoft docs guarantee simple reads and writes to properly * aligned 32-bit variables use only a single instruction. * http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx */ value = var->value; #else #error No compiler defined for Atomic_Read #endif return value; } #define Atomic_Read Atomic_Read32 /* *----------------------------------------------------------------------------- * * Atomic_ReadWrite32 -- * * Read followed by write * * Results: * The value of the atomic variable before the write. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_ReadWrite32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { #if defined __GNUC__ #ifdef VM_ARM_V7 register volatile uint32 retVal; register volatile uint32 res; dmb(); __asm__ __volatile__( "1: ldrex %[retVal], [%[var]] \n\t" "strex %[res], %[val], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [retVal] "=&r" (retVal), [res] "=&r" (res) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); return retVal; #elif defined VM_ARM_64 return _VMATOM_X(RW, 32, TRUE, &var->value, val); #else /* VM_X86_ANY */ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "xchgl %0, %1" : "=r" (val), "+m" (var->value) : "0" (val) : "memory" ); return val; #endif /* VM_X86_ANY */ #elif defined _MSC_VER return _InterlockedExchange((long *)&var->value, (long)val); #else #error No compiler defined for Atomic_ReadWrite #endif // __GNUC__ } #define Atomic_ReadWrite Atomic_ReadWrite32 /* *----------------------------------------------------------------------------- * * Atomic_Write32 -- * * Write * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Write32(Atomic_uint32 *var, // OUT uint32 val) // IN { #if defined VMM || defined VM_ARM_64 || defined VMKERNEL || defined VMKERNEL_MODULE ASSERT(((uintptr_t)var % 4) == 0); #endif #if defined __GNUC__ #if defined VM_ARM_64 _VMATOM_X(W, 32, &var->value, val); #elif defined VM_ARM_32 /* * Best left this way due to the intricacies of exclusive load/store * operations on legacy (32-bit) ARM. * * A3.4.1 ARM DDI 0406C: * * When a processor writes using any instruction other than a * Store-Exclusive: * * - if the write is to a physical address that is not covered by its local * monitor the write does not affect the state of the local monitor * - if the write is to a physical address that is covered by its local * monitor it is IMPLEMENTATION DEFINED whether the write affects the * state of the local monitor. * * A3.4.5 ARM DDI 0406C: * * If two STREX instructions are executed without an intervening LDREX the * second STREX returns a status value of 1. This means that: * * - ARM recommends that, in a given thread of execution, every STREX has a * preceding LDREX associated with it * - it is not necessary for every LDREX to have a subsequent STREX. */ Atomic_ReadWrite32(var, val); #else /* * Use inline assembler to force using a single store instruction to * ensure that the compiler doesn't split a transfer operation into multiple * instructions. */ __asm__ __volatile__( "mov %1, %0" : "=m" (var->value) : "r" (val) ); #endif #elif defined _MSC_VER /* * Microsoft docs guarantee simple reads and writes to properly * aligned 32-bit variables use only a single instruction. * http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx */ var->value = val; #else #error No compiler defined for Atomic_Write #endif } #define Atomic_Write Atomic_Write32 /* *----------------------------------------------------------------------------- * * Atomic_ReadIfEqualWrite32 -- * * Compare exchange: Read variable, if equal to oldVal, write newVal * * Results: * The value of the atomic variable before the write. * * Side effects: * The variable may be modified. * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_ReadIfEqualWrite32(Atomic_uint32 *var, // IN/OUT uint32 oldVal, // IN uint32 newVal) // IN { #if defined __GNUC__ #ifdef VM_ARM_V7 register uint32 retVal; register uint32 res; dmb(); __asm__ __volatile__( "1: ldrex %[retVal], [%[var]] \n\t" "mov %[res], #0 \n\t" "teq %[retVal], %[oldVal] \n\t" "strexeq %[res], %[newVal], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [retVal] "=&r" (retVal), [res] "=&r" (res) : [var] "r" (&var->value), [oldVal] "r" (oldVal), [newVal] "r" (newVal) : "cc" ); dmb(); return retVal; #elif defined VM_ARM_64 return _VMATOM_X(RIFEQW, 32, TRUE, &var->value, oldVal, newVal); #else /* VM_X86_ANY */ uint32 val; /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; cmpxchgl %2, %1" : "=a" (val), "+m" (var->value) : "r" (newVal), "0" (oldVal) : "cc", "memory" ); return val; #endif /* VM_X86_ANY */ #elif defined _MSC_VER return _InterlockedCompareExchange((long *)&var->value, (long)newVal, (long)oldVal); #else #error No compiler defined for Atomic_ReadIfEqualWrite #endif } #define Atomic_ReadIfEqualWrite Atomic_ReadIfEqualWrite32 #if defined VM_64BIT || defined VM_ARM_V7 /* *----------------------------------------------------------------------------- * * Atomic_ReadIfEqualWrite64 -- * * Compare exchange: Read variable, if equal to oldVal, write newVal * * Results: * The value of the atomic variable before the write. * * Side effects: * The variable may be modified. * *----------------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadIfEqualWrite64(Atomic_uint64 *var, // IN/OUT uint64 oldVal, // IN uint64 newVal) // IN { #if defined __GNUC__ #ifdef VM_ARM_V7 register uint64 retVal; register uint32 res; dmb(); /* * Under Apple LLVM version 5.0 (clang-500.2.76) (based on LLVM 3.3svn) * There will be a warning: * "value size does not match register size specified by the constraint * and modifier [-Wasm-operand-widths]" * on the lines: * : [var] "r" (&var->value), [oldVal] "r" (oldVal), [newVal] "r" (newVal) * ^ * : [var] "r" (&var->value), [oldVal] "r" (oldVal), [newVal] "r" (newVal) * ^ * * Furthermore, using a 32-bits register to store a * 64-bits value of an variable looks risky. */ #if defined __APPLE__ && __clang__ == 1 && __clang_major__ >= 5 #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wasm-operand-widths" #endif __asm__ __volatile__( "1: ldrexd %[retVal], %H[retVal], [%[var]] \n\t" "mov %[res], #0 \n\t" "teq %[retVal], %[oldVal] \n\t" "teqeq %H[retVal], %H[oldVal] \n\t" "strexdeq %[res], %[newVal], %H[newVal], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [retVal] "=&r" (retVal), [res] "=&r" (res) : [var] "r" (&var->value), [oldVal] "r" (oldVal), [newVal] "r" (newVal) : "cc" ); #if defined __APPLE__ && __clang__ == 1 && __clang_major__ >= 5 #pragma clang diagnostic pop #endif // defined __APPLE__ && __clang__ == 1 && __clang_major__ >= 5 dmb(); return retVal; #elif defined VM_ARM_64 return _VMATOM_X(RIFEQW, 64, TRUE, &var->value, oldVal, newVal); #else /* VM_X86_64 */ uint64 val; /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; cmpxchgq %2, %1" : "=a" (val), "+m" (var->value) : "r" (newVal), "0" (oldVal) : "cc", "memory" ); return val; #endif //VM_ARM_V7 #elif defined _MSC_VER return _InterlockedCompareExchange64((__int64 *)&var->value, (__int64)newVal, (__int64)oldVal); #else #error No compiler defined for Atomic_ReadIfEqualWrite64 #endif } #endif /* *----------------------------------------------------------------------------- * * Atomic_And32 -- * * Atomic read, bitwise AND with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_And32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { #if defined __GNUC__ #ifdef VM_ARM_V7 register volatile uint32 res; register volatile uint32 tmp; dmb(); __asm__ __volatile__( "1: ldrex %[tmp], [%[var]] \n\t" "and %[tmp], %[tmp], %[val] \n\t" "strex %[res], %[tmp], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [res] "=&r" (res), [tmp] "=&r" (tmp) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); #elif defined VM_ARM_64 _VMATOM_X(OP, 32, TRUE, &var->value, and, val); #else /* VM_X86_ANY */ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; andl %1, %0" : "+m" (var->value) : "ri" (val) : "cc", "memory" ); #endif /* VM_X86_ANY */ #elif defined _MSC_VER #if defined __x86_64__ || defined VM_ARM_32 _InterlockedAnd((long *)&var->value, (long)val); #else __asm mov eax, val __asm mov ebx, var __asm lock and [ebx]Atomic_uint32.value, eax #endif #else #error No compiler defined for Atomic_And #endif } #define Atomic_And Atomic_And32 /* *----------------------------------------------------------------------------- * * Atomic_Or32 -- * * Atomic read, bitwise OR with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Or32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { #if defined __GNUC__ #ifdef VM_ARM_V7 register volatile uint32 res; register volatile uint32 tmp; dmb(); __asm__ __volatile__( "1: ldrex %[tmp], [%[var]] \n\t" "orr %[tmp], %[tmp], %[val] \n\t" "strex %[res], %[tmp], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [res] "=&r" (res), [tmp] "=&r" (tmp) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); #elif defined VM_ARM_64 _VMATOM_X(OP, 32, TRUE, &var->value, orr, val); #else /* VM_X86_ANY */ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; orl %1, %0" : "+m" (var->value) : "ri" (val) : "cc", "memory" ); #endif /* VM_X86_ANY */ #elif defined _MSC_VER #if defined __x86_64__ || defined VM_ARM_32 _InterlockedOr((long *)&var->value, (long)val); #else __asm mov eax, val __asm mov ebx, var __asm lock or [ebx]Atomic_uint32.value, eax #endif #else #error No compiler defined for Atomic_Or #endif } #define Atomic_Or Atomic_Or32 /* *----------------------------------------------------------------------------- * * Atomic_Xor32 -- * * Atomic read, bitwise XOR with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Xor32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { #if defined __GNUC__ #ifdef VM_ARM_V7 register volatile uint32 res; register volatile uint32 tmp; dmb(); __asm__ __volatile__( "1: ldrex %[tmp], [%[var]] \n\t" "eor %[tmp], %[tmp], %[val] \n\t" "strex %[res], %[tmp], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [res] "=&r" (res), [tmp] "=&r" (tmp) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); #elif defined VM_ARM_64 _VMATOM_X(OP, 32, TRUE, &var->value, eor, val); #else /* VM_X86_ANY */ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; xorl %1, %0" : "+m" (var->value) : "ri" (val) : "cc", "memory" ); #endif /* VM_X86_ANY */ #elif defined _MSC_VER #if defined __x86_64__ || defined VM_ARM_32 _InterlockedXor((long *)&var->value, (long)val); #else __asm mov eax, val __asm mov ebx, var __asm lock xor [ebx]Atomic_uint32.value, eax #endif #else #error No compiler defined for Atomic_Xor #endif } #define Atomic_Xor Atomic_Xor32 #if defined VM_64BIT /* *----------------------------------------------------------------------------- * * Atomic_Xor64 -- * * Atomic read, bitwise XOR with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Xor64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { #if defined __GNUC__ #if defined VM_ARM_64 _VMATOM_X(OP, 64, TRUE, &var->value, eor, val); #else /* VM_X86_64 */ /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; xorq %1, %0" : "+m" (var->value) : "re" (val) : "cc", "memory" ); #endif #elif defined _MSC_VER _InterlockedXor64((__int64 *)&var->value, (__int64)val); #else #error No compiler defined for Atomic_Xor64 #endif } #endif /* *----------------------------------------------------------------------------- * * Atomic_Add32 -- * * Atomic read, add a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Add32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { #if defined __GNUC__ #ifdef VM_ARM_V7 register volatile uint32 res; register volatile uint32 tmp; dmb(); __asm__ __volatile__( "1: ldrex %[tmp], [%[var]] \n\t" "add %[tmp], %[tmp], %[val] \n\t" "strex %[res], %[tmp], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [res] "=&r" (res), [tmp] "=&r" (tmp) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); #elif defined VM_ARM_64 _VMATOM_X(OP, 32, TRUE, &var->value, add, val); #else /* VM_X86_ANY */ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; addl %1, %0" : "+m" (var->value) : "ri" (val) : "cc", "memory" ); #endif /* VM_X86_ANY */ #elif defined _MSC_VER _InterlockedExchangeAdd((long *)&var->value, (long)val); #else #error No compiler defined for Atomic_Add #endif } #define Atomic_Add Atomic_Add32 /* *----------------------------------------------------------------------------- * * Atomic_Sub32 -- * * Atomic read, subtract a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Sub32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { #if defined __GNUC__ #ifdef VM_ARM_V7 register volatile uint32 res; register volatile uint32 tmp; dmb(); __asm__ __volatile__( "1: ldrex %[tmp], [%[var]] \n\t" "sub %[tmp], %[tmp], %[val] \n\t" "strex %[res], %[tmp], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [res] "=&r" (res), [tmp] "=&r" (tmp) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); #elif defined VM_ARM_64 _VMATOM_X(OP, 32, TRUE, &var->value, sub, val); #else /* VM_X86_ANY */ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; subl %1, %0" : "+m" (var->value) : "ri" (val) : "cc", "memory" ); #endif /* VM_X86_ANY */ #elif defined _MSC_VER _InterlockedExchangeAdd((long *)&var->value, -(long)val); #else #error No compiler defined for Atomic_Sub #endif } #define Atomic_Sub Atomic_Sub32 /* *----------------------------------------------------------------------------- * * Atomic_Inc32 -- * * Atomic read, increment, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Inc32(Atomic_uint32 *var) // IN/OUT { #ifdef __GNUC__ #if defined VM_ARM_ANY Atomic_Add32(var, 1); #else /* VM_X86_ANY */ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; incl %0" : "+m" (var->value) : : "cc", "memory" ); #endif /* VM_X86_ANY */ #elif defined _MSC_VER _InterlockedIncrement((long *)&var->value); #else #error No compiler defined for Atomic_Inc #endif } #define Atomic_Inc Atomic_Inc32 /* *----------------------------------------------------------------------------- * * Atomic_Dec32 -- * * Atomic read, decrement, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Dec32(Atomic_uint32 *var) // IN/OUT { #ifdef __GNUC__ #if defined VM_ARM_ANY Atomic_Sub32(var, 1); #else /* VM_X86_ANY */ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; decl %0" : "+m" (var->value) : : "cc", "memory" ); #endif /* VM_X86_ANY */ #elif defined _MSC_VER _InterlockedDecrement((long *)&var->value); #else #error No compiler defined for Atomic_Dec #endif } #define Atomic_Dec Atomic_Dec32 /* * Note that the technique below can be used to implement ReadX(), where X is * an arbitrary mathematical function. */ /* *----------------------------------------------------------------------------- * * Atomic_ReadOr32 -- * * Atomic read (returned), bitwise OR with a value, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_ReadOr32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { uint32 res; #if defined VM_ARM_64 res = _VMATOM_X(ROP, 32, TRUE, &var->value, orr, val); #else do { res = Atomic_Read32(var); } while (res != Atomic_ReadIfEqualWrite32(var, res, res | val)); #endif return res; } /* *----------------------------------------------------------------------------- * * Atomic_ReadAnd32 -- * * Atomic read (returned), bitwise And with a value, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_ReadAnd32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { uint32 res; #if defined VM_ARM_64 res = _VMATOM_X(ROP, 32, TRUE, &var->value, and, val); #else do { res = Atomic_Read32(var); } while (res != Atomic_ReadIfEqualWrite32(var, res, res & val)); #endif return res; } #if defined VM_64BIT /* *----------------------------------------------------------------------------- * * Atomic_ReadOr64 -- * * Atomic read (returned), bitwise OR with a value, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadOr64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { uint64 res; #if defined VM_ARM_64 res = _VMATOM_X(ROP, 64, TRUE, &var->value, orr, val); #else do { res = var->value; } while (res != Atomic_ReadIfEqualWrite64(var, res, res | val)); #endif return res; } /* *----------------------------------------------------------------------------- * * Atomic_ReadAnd64 -- * * Atomic read (returned), bitwise AND with a value, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadAnd64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { uint64 res; #if defined VM_ARM_64 res = _VMATOM_X(ROP, 64, TRUE, &var->value, and, val); #else do { res = var->value; } while (res != Atomic_ReadIfEqualWrite64(var, res, res & val)); #endif return res; } #endif /* defined VM_64BIT */ /* *----------------------------------------------------------------------------- * * Atomic_ReadAdd32 -- * * Atomic read (returned), add a value, write. * * If you have to implement ReadAdd32() on an architecture other than * x86 or x86-64, you might want to consider doing something similar to * Atomic_ReadOr32(). * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_ReadAdd32(Atomic_uint32 *var, // IN/OUT uint32 val) // IN { #if defined __GNUC__ #ifdef VM_ARM_V7 register volatile uint32 res; register volatile uint32 retVal; register volatile uint32 tmp; dmb(); __asm__ __volatile__( "1: ldrex %[retVal], [%[var]] \n\t" "add %[tmp], %[val], %[retVal] \n\t" "strex %[res], %[tmp], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [tmp] "=&r" (tmp), [res] "=&r" (res), [retVal] "=&r" (retVal) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); return retVal; #elif defined VM_ARM_64 return _VMATOM_X(ROP, 32, TRUE, &var->value, add, val); #else /* VM_X86_ANY */ /* Checked against the Intel manual and GCC --walken */ __asm__ __volatile__( "lock; xaddl %0, %1" : "=r" (val), "+m" (var->value) : "0" (val) : "cc", "memory" ); return val; #endif /* VM_X86_ANY */ #elif defined _MSC_VER return _InterlockedExchangeAdd((long *)&var->value, (long)val); #else #error No compiler defined for Atomic_ReadAdd32 #endif } /* *----------------------------------------------------------------------------- * * Atomic_ReadInc32 -- * * Atomic read (returned), increment, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_ReadInc32(Atomic_uint32 *var) // IN/OUT { return Atomic_ReadAdd32(var, 1); } /* *----------------------------------------------------------------------------- * * Atomic_ReadDec32 -- * * Atomic read (returned), decrement, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint32 Atomic_ReadDec32(Atomic_uint32 *var) // IN/OUT { return Atomic_ReadAdd32(var, (uint32)-1); } /* *----------------------------------------------------------------------------- * * Atomic_CMPXCHG64 -- * * Compare exchange: Read variable, if equal to oldVal, write newVal * * Results: * TRUE if equal, FALSE if not equal * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE Bool Atomic_CMPXCHG64(Atomic_uint64 *var, // IN/OUT uint64 oldVal, // IN uint64 newVal) // IN { #if defined __GNUC__ #if defined VM_ARM_ANY return Atomic_ReadIfEqualWrite64(var, oldVal, newVal) == oldVal; #else /* VM_X86_ANY */ Bool equal; /* Checked against the Intel manual and GCC --walken */ #if defined __x86_64__ uint64 dummy; __asm__ __volatile__( "lock; cmpxchgq %3, %0" "\n\t" "sete %1" : "+m" (*var), "=qm" (equal), "=a" (dummy) : "r" (newVal), "2" (oldVal) : "cc", "memory" ); #else /* 32-bit version for non-ARM */ typedef struct { uint32 lowValue; uint32 highValue; } S_uint64; int dummy1, dummy2; # if defined __PIC__ /* * Rules for __asm__ statements in __PIC__ code * -------------------------------------------- * * The compiler uses %ebx for __PIC__ code, so an __asm__ statement cannot * clobber %ebx. The __asm__ statement can temporarily modify %ebx, but _for * each parameter that is used while %ebx is temporarily modified_: * * 1) The constraint cannot be "m", because the memory location the compiler * chooses could then be relative to %ebx. * * 2) The constraint cannot be a register class which contains %ebx (such as * "r" or "q"), because the register the compiler chooses could then be * %ebx. (This happens when compiling the Fusion UI with gcc 4.2.1, Apple * build 5577.) * * 3) Using register classes even for other values is problematic, as gcc * can decide e.g. %ecx == %edi == 0 (as compile-time constants) and * ends up using one register for two things. Which breaks xchg's ability * to temporarily put the PIC pointer somewhere else. PR772455 * * For that reason alone, the __asm__ statement should keep the regions * where it temporarily modifies %ebx as small as possible, and should * prefer specific register assignments. */ __asm__ __volatile__( "xchgl %%ebx, %6" "\n\t" "lock; cmpxchg8b (%3)" "\n\t" "xchgl %%ebx, %6" "\n\t" "sete %0" : "=qm" (equal), "=a" (dummy1), "=d" (dummy2) : /* * See the "Rules for __asm__ statements in __PIC__ code" above: %3 * must use a register class which does not contain %ebx. * "a"/"c"/"d" are already used, so we are left with either "S" or "D". * * Note that this assembly uses ALL GP registers (with %esp reserved for * stack, %ebp reserved for frame, %ebx reserved for PIC). */ "S" (var), "1" (((S_uint64 *)&oldVal)->lowValue), "2" (((S_uint64 *)&oldVal)->highValue), "D" (((S_uint64 *)&newVal)->lowValue), "c" (((S_uint64 *)&newVal)->highValue) : "cc", "memory" ); # else __asm__ __volatile__( "lock; cmpxchg8b %0" "\n\t" "sete %1" : "+m" (*var), "=qm" (equal), "=a" (dummy1), "=d" (dummy2) : "2" (((S_uint64 *)&oldVal)->lowValue), "3" (((S_uint64 *)&oldVal)->highValue), "b" (((S_uint64 *)&newVal)->lowValue), "c" (((S_uint64 *)&newVal)->highValue) : "cc", "memory" ); # endif #endif return equal; #endif //VM_ARM_V7 #elif defined _MSC_VER return (__int64)oldVal == _InterlockedCompareExchange64((__int64 *)&var->value, (__int64)newVal, (__int64)oldVal); #else #error No compiler defined for Atomic_CMPXCHG64 #endif // !GNUC } /* *----------------------------------------------------------------------------- * * Atomic_CMPXCHG32 -- * * Compare exchange: Read variable, if equal to oldVal, write newVal * * Results: * TRUE if equal, FALSE if not equal * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE Bool Atomic_CMPXCHG32(Atomic_uint32 *var, // IN/OUT uint32 oldVal, // IN uint32 newVal) // IN { #if defined __GNUC__ #if defined VM_ARM_ANY return Atomic_ReadIfEqualWrite32(var, oldVal, newVal) == oldVal; #else /* VM_X86_ANY */ Bool equal; uint32 dummy; __asm__ __volatile__( "lock; cmpxchgl %3, %0" "\n\t" "sete %1" : "+m" (*var), "=qm" (equal), "=a" (dummy) : "r" (newVal), "2" (oldVal) : "cc", "memory" ); return equal; #endif /* VM_X86_ANY */ #else // defined __GNUC__ return Atomic_ReadIfEqualWrite32(var, oldVal, newVal) == oldVal; #endif // !defined __GNUC__ } /* *----------------------------------------------------------------------------- * * Atomic_Read64 -- * * Read and return. * * Results: * The value of the atomic variable. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE uint64 Atomic_Read64(Atomic_uint64 const *var) // IN { #if defined __GNUC__ uint64 value; #endif #if defined VMM || defined VM_ARM_64 || defined VMKERNEL || defined VMKERNEL_MODULE ASSERT((uintptr_t)var % 8 == 0); #endif #if defined __GNUC__ && defined __x86_64__ /* * Use asm to ensure we emit a single load. */ __asm__ __volatile__( "movq %1, %0" : "=r" (value) : "m" (var->value) ); #elif defined __GNUC__ && defined __i386__ /* * Since cmpxchg8b will replace the contents of EDX:EAX with the * value in memory if there is no match, we need only execute the * instruction once in order to atomically read 64 bits from * memory. The only constraint is that ECX:EBX must have the same * value as EDX:EAX so that if the comparison succeeds. We * intentionally don't tell gcc that we are using ebx and ecx as we * don't modify them and do not care what value they store. */ __asm__ __volatile__( "mov %%ebx, %%eax" "\n\t" "mov %%ecx, %%edx" "\n\t" "lock; cmpxchg8b %1" : "=&A" (value) : "m" (*var) : "cc" ); #elif defined _MSC_VER && defined __x86_64__ /* * Microsoft docs guarantee "Simple reads and writes to properly * aligned 64-bit variables are atomic on 64-bit Windows." * http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx * * XXX Verify that value is properly aligned. Bug 61315. */ return var->value; #elif defined _MSC_VER && defined VM_ARM_32 return _InterlockedAdd64((__int64 *)&var->value, 0); #elif defined _MSC_VER && defined __i386__ # pragma warning(push) # pragma warning(disable : 4035) // disable no-return warning { __asm mov ecx, var __asm mov edx, ecx __asm mov eax, ebx __asm lock cmpxchg8b [ecx] // edx:eax is the return value; this is documented to work. --mann } # pragma warning(pop) #elif defined __GNUC__ && defined VM_ARM_V7 __asm__ __volatile__( "ldrexd %[value], %H[value], [%[var]] \n\t" : [value] "=&r" (value) : [var] "r" (&var->value) ); #elif defined VM_ARM_64 value = _VMATOM_X(R, 64, &var->value); #endif #if defined __GNUC__ return value; #endif } /* *---------------------------------------------------------------------- * * Atomic_ReadUnaligned64 -- * * Atomically read a 64 bit integer, possibly misaligned. * This function can be *very* expensive, costing over 50 kcycles * on Nehalem. * * Note that "var" needs to be writable, even though it will not * be modified. * * Results: * The value of the atomic variable. * * Side effects: * None * *---------------------------------------------------------------------- */ #if defined VM_64BIT static INLINE uint64 Atomic_ReadUnaligned64(Atomic_uint64 const *var) // IN: { return Atomic_ReadIfEqualWrite64((Atomic_uint64*)var, 0, 0); } #endif /* *---------------------------------------------------------------------- * * Atomic_ReadAdd64 -- * * Atomically adds a 64-bit integer to another * * Results: * Returns the old value just prior to the addition * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadAdd64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { #if defined VM_ARM_64 return _VMATOM_X(ROP, 64, TRUE, &var->value, add, val); #elif defined __x86_64__ #if defined __GNUC__ __asm__ __volatile__( "lock; xaddq %0, %1" : "=r" (val), "+m" (var->value) : "0" (val) : "cc", "memory" ); return val; #elif defined _MSC_VER return _InterlockedExchangeAdd64((__int64 *)&var->value, (__int64)val); #else #error No compiler defined for Atomic_ReadAdd64 #endif #else uint64 oldVal; uint64 newVal; do { oldVal = var->value; newVal = oldVal + val; } while (!Atomic_CMPXCHG64(var, oldVal, newVal)); return oldVal; #endif } /* *---------------------------------------------------------------------- * * Atomic_ReadSub64 -- * * Atomically subtracts a 64-bit integer to another * * Results: * Returns the old value just prior to the subtraction * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadSub64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { #if defined VM_ARM_64 return _VMATOM_X(ROP, 64, TRUE, &var->value, sub, val); #else return Atomic_ReadAdd64(var, -(int64)val); #endif } /* *---------------------------------------------------------------------- * * Atomic_ReadInc64 -- * * Atomically increments a 64-bit integer * * Results: * Returns the old value just prior to incrementing * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadInc64(Atomic_uint64 *var) // IN/OUT { return Atomic_ReadAdd64(var, 1); } /* *---------------------------------------------------------------------- * * Atomic_ReadDec64 -- * * Atomically decrements a 64-bit integer * * Results: * Returns the old value just prior to decrementing * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadDec64(Atomic_uint64 *var) // IN/OUT { return Atomic_ReadAdd64(var, (uint64)CONST64(-1)); } /* *----------------------------------------------------------------------------- * * Atomic_Add64 -- * * Atomic read, add a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Add64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { #if !defined VM_64BIT Atomic_ReadAdd64(var, val); /* Return value is unused. */ #elif defined __GNUC__ #if defined VM_ARM_64 _VMATOM_X(OP, 64, TRUE, &var->value, add, val); #else /* defined VM_X86_64 */ /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; addq %1, %0" : "+m" (var->value) : "re" (val) : "cc", "memory" ); #endif #elif defined _MSC_VER _InterlockedExchangeAdd64((__int64 *)&var->value, (__int64)val); #else #error No compiler defined for Atomic_Add64 #endif } /* *----------------------------------------------------------------------------- * * Atomic_Sub64 -- * * Atomic read, subtract a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Sub64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { #if !defined VM_64BIT Atomic_ReadSub64(var, val); /* Return value is unused. */ #elif defined __GNUC__ #if defined VM_ARM_64 _VMATOM_X(OP, 64, TRUE, &var->value, sub, val); #else /* VM_X86_64 */ /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; subq %1, %0" : "+m" (var->value) : "re" (val) : "cc", "memory" ); #endif #elif defined _MSC_VER _InterlockedExchangeAdd64((__int64 *)&var->value, (__int64)-val); #else #error No compiler defined for Atomic_Sub64 #endif } /* *----------------------------------------------------------------------------- * * Atomic_Inc64 -- * * Atomic read, increment, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Inc64(Atomic_uint64 *var) // IN/OUT { #if defined VM_ARM_64 Atomic_Add64(var, 1); #elif !defined __x86_64__ Atomic_ReadInc64(var); /* Return value is unused. */ #elif defined __GNUC__ /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; incq %0" : "+m" (var->value) : : "cc", "memory" ); #elif defined _MSC_VER _InterlockedIncrement64((__int64 *)&var->value); #else #error No compiler defined for Atomic_Inc64 #endif } /* *----------------------------------------------------------------------------- * * Atomic_Dec64 -- * * Atomic read, decrement, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Dec64(Atomic_uint64 *var) // IN/OUT { #if defined VM_ARM_64 Atomic_Sub64(var, 1); #elif !defined __x86_64__ Atomic_ReadDec64(var); /* Return value is unused. */ #elif defined __GNUC__ /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; decq %0" : "+m" (var->value) : : "cc", "memory" ); #elif defined _MSC_VER _InterlockedDecrement64((__int64 *)&var->value); #else #error No compiler defined for Atomic_Dec64 #endif } /* *----------------------------------------------------------------------------- * * Atomic_ReadWrite64 -- * * Read followed by write * * Results: * The value of the atomic variable before the write. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint64 Atomic_ReadWrite64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { #if defined __x86_64__ #if defined __GNUC__ /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "xchgq %0, %1" : "=r" (val), "+m" (var->value) : "0" (val) : "memory" ); return val; #elif defined _MSC_VER return _InterlockedExchange64((__int64 *)&var->value, (__int64)val); #else #error No compiler defined for Atomic_ReadWrite64 #endif #elif defined VM_ARM_64 return _VMATOM_X(RW, 64, TRUE, &var->value, val); #else uint64 oldVal; do { oldVal = var->value; } while (!Atomic_CMPXCHG64(var, oldVal, val)); return oldVal; #endif } /* *----------------------------------------------------------------------------- * * Atomic_Write64 -- * * Write * * Results: * None. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Write64(Atomic_uint64 *var, // OUT uint64 val) // IN { #if defined VMM || defined VM_ARM_64 || defined VMKERNEL || defined VMKERNEL_MODULE ASSERT((uintptr_t)var % 8 == 0); #endif #if defined __x86_64__ #if defined __GNUC__ /* * There is no move instruction for 64-bit immediate to memory, so unless * the immediate value fits in 32-bit (i.e. can be sign-extended), GCC * breaks the assignment into two movl instructions. The code below forces * GCC to load the immediate value into a register first. */ __asm__ __volatile__( "movq %1, %0" : "=m" (var->value) : "r" (val) ); #elif defined _MSC_VER /* * Microsoft docs guarantee "Simple reads and writes to properly aligned * 64-bit variables are atomic on 64-bit Windows." * http://msdn.microsoft.com/en-us/library/ms684122%28VS.85%29.aspx * * XXX Verify that value is properly aligned. Bug 61315. */ var->value = val; #else #error No compiler defined for Atomic_Write64 #endif #elif defined VM_ARM_64 _VMATOM_X(W, 64, &var->value, val); #else (void)Atomic_ReadWrite64(var, val); #endif } /* *----------------------------------------------------------------------------- * * Atomic_Or64 -- * * Atomic read, bitwise OR with a 64-bit value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Or64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { #if defined __x86_64__ #if defined __GNUC__ /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; orq %1, %0" : "+m" (var->value) : "re" (val) : "cc", "memory" ); #elif defined _MSC_VER _InterlockedOr64((__int64 *)&var->value, (__int64)val); #else #error No compiler defined for Atomic_Or64 #endif #elif defined VM_ARM_64 _VMATOM_X(OP, 64, TRUE, &var->value, orr, val); #else // __x86_64__ uint64 oldVal; uint64 newVal; do { oldVal = var->value; newVal = oldVal | val; } while (!Atomic_CMPXCHG64(var, oldVal, newVal)); #endif } /* *----------------------------------------------------------------------------- * * Atomic_And64 -- * * Atomic read, bitwise AND with a 64-bit value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_And64(Atomic_uint64 *var, // IN/OUT uint64 val) // IN { #if defined __x86_64__ #if defined __GNUC__ /* Checked against the AMD manual and GCC --hpreg */ __asm__ __volatile__( "lock; andq %1, %0" : "+m" (var->value) : "re" (val) : "cc", "memory" ); #elif defined _MSC_VER _InterlockedAnd64((__int64 *)&var->value, (__int64)val); #else #error No compiler defined for Atomic_And64 #endif #elif defined VM_ARM_64 _VMATOM_X(OP, 64, TRUE, &var->value, and, val); #else // __x86_64__ uint64 oldVal; uint64 newVal; do { oldVal = var->value; newVal = oldVal & val; } while (!Atomic_CMPXCHG64(var, oldVal, newVal)); #endif } /* *----------------------------------------------------------------------------- * * Atomic_SetBit64 -- * * Atomically set the bit 'bit' in var. Bit must be between 0 and 63. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_SetBit64(Atomic_uint64 *var, // IN/OUT unsigned bit) // IN { #if defined __x86_64__ && defined __GNUC__ ASSERT(bit <= 63); __asm__ __volatile__( "lock; btsq %1, %0" : "+m" (var->value) : "ri" ((uint64)bit) : "cc", "memory" ); #else uint64 oldVal; uint64 newVal; ASSERT(bit <= 63); do { oldVal = var->value; newVal = oldVal | (CONST64U(1) << bit); } while (!Atomic_CMPXCHG64(var, oldVal, newVal)); #endif } /* *----------------------------------------------------------------------------- * * Atomic_ClearBit64 -- * * Atomically clear the bit 'bit' in var. Bit must be between 0 and 63. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_ClearBit64(Atomic_uint64 *var, // IN/OUT unsigned bit) // IN { #if defined __x86_64__ && defined __GNUC__ ASSERT(bit <= 63); __asm__ __volatile__( "lock; btrq %1, %0" : "+m" (var->value) : "ri" ((uint64)bit) : "cc", "memory" ); #else uint64 oldVal; uint64 newVal; ASSERT(bit <= 63); do { oldVal = var->value; newVal = oldVal & ~(CONST64U(1) << bit); } while (!Atomic_CMPXCHG64(var, oldVal, newVal)); #endif } /* *----------------------------------------------------------------------------- * * Atomic_TestBit64 -- * * Read the bit 'bit' in var. Bit must be between 0 and 63. * * Results: * TRUE if the tested bit was set; else FALSE. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE Bool Atomic_TestBit64(Atomic_uint64 *var, // IN unsigned bit) // IN { Bool out; ASSERT(bit <= 63); #if defined __x86_64__ && defined __GNUC__ __asm__ __volatile__( "btq %2, %1; setc %0" : "=rm"(out) : "m" (var->value), "rJ" ((uint64)bit) : "cc" ); #else out = (var->value & (CONST64U(1) << bit)) != 0; #endif return out; } /* *----------------------------------------------------------------------------- * * Atomic_TestSetBit64 -- * * Atomically test and set the bit 'bit' in var. * Bit must be between 0 and 63. * * Results: * TRUE if the tested bit was set; else FALSE. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE Bool Atomic_TestSetBit64(Atomic_uint64 *var, // IN/OUT unsigned bit) // IN { #if defined __x86_64__ && defined __GNUC__ Bool out; ASSERT(bit <= 63); __asm__ __volatile__( "lock; btsq %2, %1; setc %0" : "=rm" (out), "+m" (var->value) : "rJ" ((uint64)bit) : "cc", "memory" ); return out; #else uint64 oldVal; uint64 mask; ASSERT(bit <= 63); mask = CONST64U(1) << bit; do { oldVal = var->value; } while (!Atomic_CMPXCHG64(var, oldVal, oldVal | mask)); return (oldVal & mask) != 0; #endif } #if defined __GNUC__ /* *----------------------------------------------------------------------------- * * Atomic_Read16 -- * * Read and return. * * Results: * The value of the atomic variable. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE uint16 Atomic_Read16(Atomic_uint16 const *var) // IN { uint16 value; #if defined VMM || defined VM_ARM_64 || defined VMKERNEL || defined VMKERNEL_MODULE ASSERT((uintptr_t)var % 2 == 0); #endif #if defined __GNUC__ #if defined __x86_64__ || defined __i386__ __asm__ __volatile__( "movw %1, %0" : "=r" (value) : "m" (var->value) ); #elif defined VM_ARM_V7 NOT_TESTED(); __asm__ __volatile__( "ldrh %0, [%1]" : "=r" (value) : "r" (&var->value) ); #elif defined VM_ARM_64 value = _VMATOM_X(R, 16, &var->value); #else #error No 16-bits atomics. #endif #endif return value; } /* *----------------------------------------------------------------------------- * * Atomic_ReadWrite16 -- * * Read followed by write * * Results: * The value of the atomic variable before the write. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint16 Atomic_ReadWrite16(Atomic_uint16 *var, // IN/OUT: uint16 val) // IN: { #if defined __GNUC__ #if defined __x86_64__ || defined __i386__ __asm__ __volatile__( "xchgw %0, %1" : "=r" (val), "+m" (var->value) : "0" (val) : "memory" ); return val; #elif defined VM_ARM_V7 register volatile uint16 retVal; register volatile uint16 res; NOT_TESTED(); dmb(); __asm__ __volatile__( "1: ldrexh %[retVal], [%[var]] \n\t" "strexh %[res], %[val], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [retVal] "=&r" (retVal), [res] "=&r" (res) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); return retVal; #elif defined VM_ARM_64 return _VMATOM_X(RW, 16, TRUE, &var->value, val); #else #error No 16-bits atomics. #endif #endif } /* *----------------------------------------------------------------------------- * * Atomic_Write16 -- * * Write * * Results: * None. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Write16(Atomic_uint16 *var, // OUT: uint16 val) // IN: { #if defined VMM || defined VM_ARM_64 || defined VMKERNEL || defined VMKERNEL_MODULE ASSERT((uintptr_t)var % 2 == 0); #endif #if defined __GNUC__ #if defined __x86_64__ || defined __i386__ __asm__ __volatile__( "movw %1, %0" : "=m" (var->value) : "r" (val) ); #elif defined VM_ARM_64 _VMATOM_X(W, 16, &var->value, val); #elif defined VM_ARM_32 /* * Best left this way due to the intricacies of exclusive load/store * operations on legacy (32-bit) ARM. */ Atomic_ReadWrite16(var, val); #else #error No 16-bits atomics. #endif #endif } /* *----------------------------------------------------------------------------- * * Atomic_ReadIfEqualWrite16 -- * * Compare exchange: Read variable, if equal to oldVal, write newVal * * Results: * The value of the atomic variable before the write. * * Side effects: * The variable may be modified. * *----------------------------------------------------------------------------- */ static INLINE uint16 Atomic_ReadIfEqualWrite16(Atomic_uint16 *var, // IN/OUT uint16 oldVal, // IN uint16 newVal) // IN { #if defined __GNUC__ #if defined __x86_64__ || defined __i386__ uint16 val; __asm__ __volatile__( "lock; cmpxchgw %2, %1" : "=a" (val), "+m" (var->value) : "r" (newVal), "0" (oldVal) : "cc", "memory" ); return val; #elif defined VM_ARM_V7 register uint16 retVal; register uint16 res; NOT_TESTED(); dmb(); __asm__ __volatile__( "1: ldrexh %[retVal], [%[var]] \n\t" "mov %[res], #0 \n\t" "teq %[retVal], %[oldVal] \n\t" "strexheq %[res], %[newVal], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [retVal] "=&r" (retVal), [res] "=&r" (res) : [var] "r" (&var->value), [oldVal] "r" (oldVal), [newVal] "r" (newVal) : "cc" ); dmb(); return retVal; #elif defined VM_ARM_64 return _VMATOM_X(RIFEQW, 16, TRUE, &var->value, oldVal, newVal); #else #error No 16-bits atomics. #endif #endif } /* *----------------------------------------------------------------------------- * * Atomic_And16 -- * * Atomic read, bitwise AND with a 16-bit value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_And16(Atomic_uint16 *var, // IN/OUT uint16 val) // IN { #if defined __GNUC__ #if defined __x86_64__ || defined __i386__ __asm__ __volatile__( "lock; andw %1, %0" : "+m" (var->value) : "re" (val) : "cc", "memory" ); #elif defined VM_ARM_V7 register volatile uint16 res; register volatile uint16 tmp; NOT_TESTED(); dmb(); __asm__ __volatile__( "1: ldrexh %[tmp], [%[var]] \n\t" "and %[tmp], %[tmp], %[val] \n\t" "strexh %[res], %[tmp], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [res] "=&r" (res), [tmp] "=&r" (tmp) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); #elif defined VM_ARM_64 _VMATOM_X(OP, 16, TRUE, &var->value, and, val); #else #error No 16-bits atomics. #endif #endif } /* *----------------------------------------------------------------------------- * * Atomic_Or16 -- * * Atomic read, bitwise OR with a 16-bit value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Or16(Atomic_uint16 *var, // IN/OUT uint16 val) // IN { #if defined __GNUC__ #if defined __x86_64__ || defined __i386__ __asm__ __volatile__( "lock; orw %1, %0" : "+m" (var->value) : "re" (val) : "cc", "memory" ); #elif defined VM_ARM_V7 register volatile uint16 res; register volatile uint16 tmp; NOT_TESTED(); dmb(); __asm__ __volatile__( "1: ldrexh %[tmp], [%[var]] \n\t" "orr %[tmp], %[tmp], %[val] \n\t" "strexh %[res], %[tmp], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [res] "=&r" (res), [tmp] "=&r" (tmp) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); #elif defined VM_ARM_64 _VMATOM_X(OP, 16, TRUE, &var->value, orr, val); #else #error No 16-bits atomics. #endif #endif } /* *----------------------------------------------------------------------------- * * Atomic_Xor16 -- * * Atomic read, bitwise XOR with a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Xor16(Atomic_uint16 *var, // IN/OUT uint16 val) // IN { #if defined __GNUC__ #if defined __x86_64__ || defined __i386__ __asm__ __volatile__( "lock; xorw %1, %0" : "+m" (var->value) : "re" (val) : "cc", "memory" ); #elif defined VM_ARM_V7 register volatile uint16 res; register volatile uint16 tmp; NOT_TESTED(); dmb(); __asm__ __volatile__( "1: ldrexh %[tmp], [%[var]] \n\t" "eor %[tmp], %[tmp], %[val] \n\t" "strexh %[res], %[tmp], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [res] "=&r" (res), [tmp] "=&r" (tmp) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); #elif defined VM_ARM_64 _VMATOM_X(OP, 16, TRUE, &var->value, eor, val); #else #error No 16-bits atomics. #endif #endif } /* *----------------------------------------------------------------------------- * * Atomic_Add16 -- * * Atomic read, add a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Add16(Atomic_uint16 *var, // IN/OUT uint16 val) // IN { #if defined __GNUC__ #if defined __x86_64__ || defined __i386__ __asm__ __volatile__( "lock; addw %1, %0" : "+m" (var->value) : "re" (val) : "cc", "memory" ); #elif defined VM_ARM_V7 register volatile uint16 res; register volatile uint16 tmp; NOT_TESTED(); dmb(); __asm__ __volatile__( "1: ldrexh %[tmp], [%[var]] \n\t" "add %[tmp], %[tmp], %[val] \n\t" "strexh %[res], %[tmp], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [res] "=&r" (res), [tmp] "=&r" (tmp) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); #elif defined VM_ARM_64 _VMATOM_X(OP, 16, TRUE, &var->value, add, val); #else #error No 16-bits atomics. #endif #endif } /* *----------------------------------------------------------------------------- * * Atomic_Sub16 -- * * Atomic read, subtract a value, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Sub16(Atomic_uint16 *var, // IN/OUT uint16 val) // IN { #if defined __GNUC__ #if defined __x86_64__ || defined __i386__ __asm__ __volatile__( "lock; subw %1, %0" : "+m" (var->value) : "re" (val) : "cc", "memory" ); #elif defined VM_ARM_V7 register volatile uint16 res; register volatile uint16 tmp; NOT_TESTED(); dmb(); __asm__ __volatile__( "1: ldrexh %[tmp], [%[var]] \n\t" "sub %[tmp], %[tmp], %[val] \n\t" "strexh %[res], %[tmp], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [res] "=&r" (res), [tmp] "=&r" (tmp) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); #elif defined VM_ARM_64 _VMATOM_X(OP, 16, TRUE, &var->value, sub, val); #else #error No 16-bits atomics. #endif #endif } /* *----------------------------------------------------------------------------- * * Atomic_Inc16 -- * * Atomic read, increment, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Inc16(Atomic_uint16 *var) // IN/OUT { #if defined __GNUC__ #if defined __x86_64__ || defined __i386__ __asm__ __volatile__( "lock; incw %0" : "+m" (var->value) : : "cc", "memory" ); #elif defined VM_ARM_ANY Atomic_Add16(var, 1); #else #error No 16-bits atomics. #endif #endif } /* *----------------------------------------------------------------------------- * * Atomic_Dec16 -- * * Atomic read, decrement, write. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE void Atomic_Dec16(Atomic_uint16 *var) // IN/OUT { #if defined __GNUC__ #if defined __x86_64__ || defined __i386__ __asm__ __volatile__( "lock; decw %0" : "+m" (var->value) : : "cc", "memory" ); #elif defined VM_ARM_ANY Atomic_Sub16(var, 1); #else #error No 16-bits atomics. #endif #endif } /* *----------------------------------------------------------------------------- * * Atomic_ReadOr16 -- * * Atomic read (returned), bitwise OR with a value, write. * * Results: * The value of the variable before the operation. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE uint16 Atomic_ReadOr16(Atomic_uint16 *var, // IN/OUT uint16 val) // IN { uint16 res; #if defined VM_ARM_64 res = _VMATOM_X(ROP, 16, TRUE, &var->value, orr, val); #else do { res = var->value; } while (res != Atomic_ReadIfEqualWrite16(var, res, res | val)); #endif return res; } /* *---------------------------------------------------------------------- * * Atomic_ReadAdd16 -- * * Atomically adds a 16-bit integer to another * * Results: * Returns the old value just prior to the addition * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE uint16 Atomic_ReadAdd16(Atomic_uint16 *var, // IN/OUT uint16 val) // IN: { #if defined __GNUC__ #if defined __x86_64__ || defined __i386__ __asm__ __volatile__( "lock; xaddw %0, %1" : "=r" (val), "+m" (var->value) : "0" (val) : "cc", "memory" ); return val; #elif defined VM_ARM_V7 register volatile uint16 res; register volatile uint16 retVal; register volatile uint16 tmp; NOT_TESTED(); dmb(); __asm__ __volatile__( "1: ldrexh %[retVal], [%[var]] \n\t" "add %[tmp], %[val], %[retVal] \n\t" "strexh %[res], %[tmp], [%[var]] \n\t" "teq %[res], #0 \n\t" "bne 1b" : [tmp] "=&r" (tmp), [res] "=&r" (res), [retVal] "=&r" (retVal) : [var] "r" (&var->value), [val] "r" (val) : "cc" ); dmb(); return retVal; #elif defined VM_ARM_64 return _VMATOM_X(ROP, 16, TRUE, &var->value, add, val); #else #error No 16-bits atomics. #endif #endif } /* *---------------------------------------------------------------------- * * Atomic_ReadInc16 -- * * Atomically increments a 64-bit integer * * Results: * Returns the old value just prior to incrementing * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE uint16 Atomic_ReadInc16(Atomic_uint16 *var) // IN/OUT { return Atomic_ReadAdd16(var, 1); } /* *---------------------------------------------------------------------- * * Atomic_ReadDec16 -- * * Atomically decrements a 64-bit integer * * Results: * Returns the old value just prior to decrementing * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE uint16 Atomic_ReadDec16(Atomic_uint16 *var) // IN/OUT { return Atomic_ReadAdd16(var, -1); } #endif /* * Template code for the Atomic_<name> type and its operators. * * The cast argument is an intermediate type cast to make some * compilers stop complaining about casting uint32 <-> void *, * even though we only do it in the 32-bit case so they are always * the same size. So for val of type uint32, instead of * (void *)val, we have (void *)(uintptr_t)val. * The specific problem case is the Windows ddk compiler * (as used by the SVGA driver). -- edward * * NOTE: See the comment in vm_assert.h for why we need UNUSED_TYPE in * AtomicAssertOnCompile(), and why we need to be very careful doing so. */ #define MAKE_ATOMIC_TYPE(name, size, in, out, cast) \ typedef Atomic_uint ## size Atomic_ ## name; \ \ \ static INLINE void \ AtomicAssertOnCompile ## name(void) \ { \ enum { AssertOnCompileMisused = 8 * sizeof (in) == size \ && 8 * sizeof (out) == size \ && 8 * sizeof (cast) == size \ ? 1 : -1 }; \ UNUSED_TYPE(typedef char AssertOnCompileFailed[AssertOnCompileMisused]);\ } \ \ \ static INLINE out \ Atomic_Read ## name(Atomic_ ## name const *var) \ { \ return (out)(cast)Atomic_Read ## size(var); \ } \ \ \ static INLINE void \ Atomic_Write ## name(Atomic_ ## name *var, \ in val) \ { \ Atomic_Write ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE out \ Atomic_ReadWrite ## name(Atomic_ ## name *var, \ in val) \ { \ return (out)(cast)Atomic_ReadWrite ## size(var, \ (uint ## size)(cast)val); \ } \ \ \ static INLINE out \ Atomic_ReadIfEqualWrite ## name(Atomic_ ## name *var, \ in oldVal, \ in newVal) \ { \ return (out)(cast)Atomic_ReadIfEqualWrite ## size(var, \ (uint ## size)(cast)oldVal, (uint ## size)(cast)newVal); \ } \ \ \ static INLINE void \ Atomic_And ## name(Atomic_ ## name *var, \ in val) \ { \ Atomic_And ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE void \ Atomic_Or ## name(Atomic_ ## name *var, \ in val) \ { \ Atomic_Or ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE void \ Atomic_Xor ## name(Atomic_ ## name *var, \ in val) \ { \ Atomic_Xor ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE void \ Atomic_Add ## name(Atomic_ ## name *var, \ in val) \ { \ Atomic_Add ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE void \ Atomic_Sub ## name(Atomic_ ## name *var, \ in val) \ { \ Atomic_Sub ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE void \ Atomic_Inc ## name(Atomic_ ## name *var) \ { \ Atomic_Inc ## size(var); \ } \ \ \ static INLINE void \ Atomic_Dec ## name(Atomic_ ## name *var) \ { \ Atomic_Dec ## size(var); \ } \ \ \ static INLINE out \ Atomic_ReadOr ## name(Atomic_ ## name *var, \ in val) \ { \ return (out)(cast)Atomic_ReadOr ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE out \ Atomic_ReadAdd ## name(Atomic_ ## name *var, \ in val) \ { \ return (out)(cast)Atomic_ReadAdd ## size(var, (uint ## size)(cast)val); \ } \ \ \ static INLINE out \ Atomic_ReadInc ## name(Atomic_ ## name *var) \ { \ return (out)(cast)Atomic_ReadInc ## size(var); \ } \ \ \ static INLINE out \ Atomic_ReadDec ## name(Atomic_ ## name *var) \ { \ return (out)(cast)Atomic_ReadDec ## size(var); \ } /* * Since we use a macro to generate these definitions, it is hard to look for * them. So DO NOT REMOVE THIS COMMENT and keep it up-to-date. --hpreg * * Atomic_Ptr * Atomic_ReadPtr -- * Atomic_WritePtr -- * Atomic_ReadWritePtr -- * Atomic_ReadIfEqualWritePtr -- * Atomic_AndPtr -- * Atomic_OrPtr -- * Atomic_XorPtr -- * Atomic_AddPtr -- * Atomic_SubPtr -- * Atomic_IncPtr -- * Atomic_DecPtr -- * Atomic_ReadOrPtr -- * Atomic_ReadAddPtr -- * Atomic_ReadIncPtr -- * Atomic_ReadDecPtr -- * * Atomic_Int * Atomic_ReadInt -- * Atomic_WriteInt -- * Atomic_ReadWriteInt -- * Atomic_ReadIfEqualWriteInt -- * Atomic_AndInt -- * Atomic_OrInt -- * Atomic_XorInt -- * Atomic_AddInt -- * Atomic_SubInt -- * Atomic_IncInt -- * Atomic_DecInt -- * Atomic_ReadOrInt -- * Atomic_ReadAddInt -- * Atomic_ReadIncInt -- * Atomic_ReadDecInt -- * * Atomic_Bool * Atomic_ReadBool -- * Atomic_WriteBool -- * Atomic_ReadWriteBool -- * Atomic_ReadIfEqualWriteBool -- * Atomic_AndBool -- * Atomic_OrBool -- * Atomic_XorBool -- * Atomic_AddBool -- * Atomic_SubBool -- * Atomic_IncBool -- * Atomic_DecBool -- * Atomic_ReadOrBool -- * Atomic_ReadAddBool -- * Atomic_ReadIncBool -- * Atomic_ReadDecBool -- */ #if defined VM_64BIT MAKE_ATOMIC_TYPE(Ptr, 64, void const *, void *, uintptr_t) #else MAKE_ATOMIC_TYPE(Ptr, 32, void const *, void *, uintptr_t) #endif MAKE_ATOMIC_TYPE(Int, 32, int, int, int) MAKE_ATOMIC_TYPE(Bool, 8, Bool, Bool, Bool) /* * Define arbitrary sized bit vector to be used by * Atomic_TestSetBitVector and Atomic_TestClearBitVector. */ #define ATOMIC_BITVECTOR(varName, capacity) \ Atomic_uint8 varName[CEILING(capacity, 8)] /* *----------------------------------------------------------------------------- * * Atomic_TestSetBitVector -- * * Atomically test and set the bit 'index' in bit vector var. * * The index input value specifies which bit to modify and is 0-based. * * Results: * Returns the value of the bit before modification. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE Bool Atomic_TestSetBitVector(Atomic_uint8 *var, // IN/OUT unsigned index) // IN { #if defined __x86_64__ && defined __GNUC__ Bool bit; __asm__ __volatile__( "lock; bts %2, %1;" "setc %0" : "=qQm" (bit), "+m" (var->value) : "rI" (index) : "cc", "memory" ); return bit; #else uint8 bit = 1 << index % 8; return (Atomic_ReadOr8(var + index / 8, bit) & bit) != 0; #endif } /* *----------------------------------------------------------------------------- * * Atomic_TestClearBitVector -- * * Atomically test and clear the bit 'index' in bit vector var. * * The index input value specifies which bit to modify and is 0-based. * * Results: * Returns the value of the bit before modification. * * Side effects: * None * *----------------------------------------------------------------------------- */ static INLINE Bool Atomic_TestClearBitVector(Atomic_uint8 *var, // IN/OUT unsigned index) // IN { #if defined __x86_64__ && defined __GNUC__ Bool bit; __asm__ __volatile__( "lock; btr %2, %1;" "setc %0" : "=qQm" (bit), "+m" (var->value) : "rI" (index) : "cc", "memory" ); return bit; #else uint8 bit = 1 << index % 8; return (Atomic_ReadAnd8(var + index / 8, ~bit) & bit) != 0; #endif } /* *----------------------------------------------------------------------------- * * Atomic_TestBitVector -- * * Test the bit 'index' (zero-based) in bit vector var. *----------------------------------------------------------------------------- */ static INLINE Bool Atomic_TestBitVector(const Atomic_uint8 *var, // IN unsigned index) // IN { uint8 bit = 1 << index % 8; return (Atomic_Read8(var + index / 8) & bit) != 0; } #ifdef VM_ARM_64 # include "vm_atomic_arm64_end.h" #endif #if defined __cplusplus } // extern "C" #endif #endif // ifndef _ATOMIC_H_ vmhgfs-only/shared/vm_basic_types.h 0000444 0000000 0000000 00000070343 13432725350 016464 0 ustar root root /********************************************************* * Copyright (C) 1998-2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * * vm_basic_types.h -- * * basic data types. */ #ifndef _VM_BASIC_TYPES_H_ #define _VM_BASIC_TYPES_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_VMKDRIVERS #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" /* * Macros __i386__ and __ia64 are intrinsically defined by GCC */ #if defined _MSC_VER && defined _M_X64 # define __x86_64__ #elif defined _MSC_VER && defined _M_IX86 # define __i386__ #endif /* * Setup a bunch of defines for instruction set architecture (ISA) related * properties. * * For compiler types/size: * * - VM_32BIT for a 32-bit ISA (with the same C compiler types/sizes as 32-bit * x86/ARM). * - VM_64BIT for a 64-bit ISA (with the same C compiler types/sizes as 64-bit * x86/ARM). * * For a given <arch> in {X86, ARM}: * * - VM_<arch>_32 for the 32-bit variant. * - VM_<arch>_64 for the 64-bit variant. * - VM_<arch>_ANY for any variant of <arch>. * * VM_X86_ANY is synonymous with the confusing and deprecated VM_I386 (which * should really be VM_X86_32). */ #ifdef __i386__ /* * VM_I386 is historically synonymous with VM_X86_ANY in bora, but misleading, * since it is confused with the __i386__ gcc but defined for both 32- and * 64-bit x86. We retain it here for legacy compatibility. */ #define VM_I386 #define VM_X86_32 #define VM_X86_ANY #define VM_32BIT #endif #ifdef __x86_64__ #define VM_X86_64 #define vm_x86_64 1 #define VM_I386 #define VM_X86_ANY #define VM_64BIT #else #define vm_x86_64 0 #endif #ifdef __arm__ #define VM_ARM_32 #define VM_ARM_ANY #define VM_32BIT #endif #ifdef __aarch64__ #define VM_ARM_64 #define vm_arm_64 1 #define VM_ARM_ANY #define VM_64BIT #else #define vm_arm_64 0 #endif #define vm_64bit (sizeof (void *) == 8) #ifdef _MSC_VER #pragma warning (3 :4505) // unreferenced local function #pragma warning (disable :4018) // signed/unsigned mismatch #pragma warning (disable :4761) // integral size mismatch in argument; conversion supplied #pragma warning (disable :4305) // truncation from 'const int' to 'short' #pragma warning (disable :4244) // conversion from 'unsigned short' to 'unsigned char' #pragma warning (disable :4267) // truncation of 'size_t' #pragma warning (disable :4146) // unary minus operator applied to unsigned type, result still unsigned #pragma warning (disable :4142) // benign redefinition of type #endif #if defined(__cplusplus) && __cplusplus >= 201103L || \ defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L || \ defined(__APPLE__) || defined(HAVE_STDINT_H) /* * We're using <stdint.h> instead of <cstdint> below because some C++ code * deliberately compiles without C++ include paths. */ #include <stdint.h> typedef uint64_t uint64; typedef int64_t int64; typedef uint32_t uint32; typedef int32_t int32; typedef uint16_t uint16; typedef int16_t int16; typedef uint8_t uint8; typedef int8_t int8; #else /* !HAVE_STDINT_H */ /* Pre-c99 or pre-c++11; use compiler extension to get 64-bit types */ #ifdef _MSC_VER typedef unsigned __int64 uint64; typedef signed __int64 int64; #elif __GNUC__ # if defined(VM_X86_64) || defined(VM_ARM_64) typedef unsigned long uint64; typedef long int64; # else /* * Only strict c90 (without extensions) lacks a 'long long' type. * If this declaration fails ... use -std=c99 or -std=gnu90. */ typedef unsigned long long uint64; typedef long long int64; # endif #else # error - Need compiler define for int64/uint64 #endif /* _MSC_VER */ typedef unsigned int uint32; typedef unsigned short uint16; typedef unsigned char uint8; typedef int int32; typedef short int16; typedef signed char int8; #endif /* HAVE_STDINT_H */ /* * The _XTYPEDEF_BOOL guard prevents colliding with: * <X11/Xlib.h> #define Bool int * <X11/Xdefs.h> typedef int Bool; * If using this header AND X11 headers, be sure to #undef Bool and * be careful about the different size. */ #if !defined(_XTYPEDEF_BOOL) #define _XTYPEDEF_BOOL /* * C does not specify whether char is signed or unsigned, and * both gcc and msvc implement it as a non-signed, non-unsigned type. * Thus, (uint8_t *)&Bool and (int8_t *)&Bool are possible compile errors. * This is intentional. */ typedef char Bool; #endif #ifndef FALSE #define FALSE 0 #endif #ifndef TRUE #define TRUE 1 #endif #define IS_BOOL(x) (((x) & ~1) == 0) /* * FreeBSD (for the tools build) unconditionally defines these in * sys/inttypes.h so don't redefine them if this file has already * been included. [greg] * * This applies to Solaris as well. */ /* * Before trying to do the includes based on OS defines, see if we can use * feature-based defines to get as much functionality as possible */ #ifdef HAVE_INTTYPES_H #include <inttypes.h> #endif #ifdef HAVE_SYS_TYPES_H #include <sys/types.h> #endif #ifdef HAVE_SYS_INTTYPES_H #include <sys/inttypes.h> #endif #ifdef HAVE_STDLIB_H #include <stdlib.h> #endif #ifdef __FreeBSD__ #include <sys/param.h> /* For __FreeBSD_version */ #endif #if !defined(USING_AUTOCONF) # if defined(__FreeBSD__) || defined(sun) # ifdef KLD_MODULE # include <sys/types.h> # else # if __FreeBSD_version >= 500043 # if !defined(VMKERNEL) # include <inttypes.h> # endif # include <sys/types.h> # else # include <sys/inttypes.h> # endif # endif # elif defined __APPLE__ # if KERNEL # include <sys/unistd.h> # include <sys/types.h> /* mostly for size_t */ # include <stdint.h> # else # include <unistd.h> # include <inttypes.h> # include <stdlib.h> # include <stdint.h> # endif # elif defined __ANDROID__ # include <stdint.h> # else # if !defined(__intptr_t_defined) && !defined(intptr_t) # ifdef VM_I386 # define __intptr_t_defined # if defined(VM_X86_64) typedef int64 intptr_t; # else typedef int32 intptr_t; # endif # elif defined(VM_ARM_64) # define __intptr_t_defined typedef int64 intptr_t; # elif defined(__arm__) # define __intptr_t_defined typedef int32 intptr_t; # endif # endif # ifndef _STDINT_H # ifdef VM_I386 # if defined(VM_X86_64) typedef uint64 uintptr_t; # else typedef uint32 uintptr_t; # endif # elif defined(VM_ARM_64) typedef uint64 uintptr_t; # elif defined(__arm__) typedef uint32 uintptr_t; # endif # endif # endif #endif #if defined(__GNUC__) && defined(__SIZEOF_INT128__) typedef unsigned __int128 uint128; typedef __int128 int128; #define MIN_INT128 ((int128)1 << 127) #define MAX_INT128 (~MIN_INT128) #define MIN_UINT128 ((uint128)0) #define MAX_UINT128 (~MIN_UINT128) #endif /* * Time * XXX These should be cleaned up. -- edward */ typedef int64 VmTimeType; /* Time in microseconds */ typedef int64 VmTimeRealClock; /* Real clock kept in microseconds */ typedef int64 VmTimeVirtualClock; /* Virtual Clock kept in CPU cycles */ /* * Printf format specifiers for size_t and 64-bit number. * Use them like this: * printf("%" FMT64 "d\n", big); * The spaces are important for C++11 compatibility. * * FMTH is for handles/fds. */ #ifdef _MSC_VER /* MSVC added C99-compatible formatting in vs2015. */ #define FMT64 "I64" #ifdef VM_X86_64 #define FMTSZ "I64" #define FMTPD "I64" #define FMTH "I64" #else #define FMTSZ "I" #define FMTPD "I" #define FMTH "I" #endif #elif defined __APPLE__ /* macOS hosts use the same formatters for 32- and 64-bit. */ #define FMT64 "ll" #if KERNEL /* macOS osfmk/kern added 'z' length specifier in 10.13 */ #define FMTSZ "l" #else #define FMTSZ "z" #endif #define FMTPD "l" #define FMTH "" #elif defined __GNUC__ /* * Every POSIX system we target has C99-compatible printf * (supports 'z' for size_t and 'll' for long long). */ #define FMTH "" #define FMTSZ "z" #if defined(VM_X86_64) || defined(VM_ARM_64) #define FMT64 "l" #define FMTPD "l" #else #define FMT64 "ll" #define FMTPD "" #endif #else #error - Need compiler define for FMT64 and FMTSZ #endif /* * Suffix for 64-bit constants. Use it like this: * CONST64(0x7fffffffffffffff) for signed or * CONST64U(0x7fffffffffffffff) for unsigned. * * 2004.08.30(thutt): * The vmcore/asm64/gen* programs are compiled as 32-bit * applications, but must handle 64 bit constants. If the * 64-bit-constant defining macros are already defined, the * definition will not be overwritten. */ #if !defined(CONST64) || !defined(CONST64U) #ifdef _MSC_VER #define CONST64(c) c##I64 #define CONST64U(c) c##uI64 #elif defined __APPLE__ #define CONST64(c) c##LL #define CONST64U(c) c##uLL #elif __GNUC__ #if defined(VM_X86_64) || defined(VM_ARM_64) #define CONST64(c) c##L #define CONST64U(c) c##uL #else #define CONST64(c) c##LL #define CONST64U(c) c##uLL #endif #else #error - Need compiler define for CONST64 #endif #endif /* * Use CONST3264/CONST3264U if you want a constant to be * treated as a 32-bit number on 32-bit compiles and * a 64-bit number on 64-bit compiles. Useful in the case * of shifts, like (CONST3264U(1) << x), where x could be * more than 31 on a 64-bit compile. */ #if defined(VM_X86_64) || defined(VM_ARM_64) #define CONST3264(a) CONST64(a) #define CONST3264U(a) CONST64U(a) #else #define CONST3264(a) (a) #define CONST3264U(a) (a) #endif #define MIN_INT8 ((int8)0x80) #define MAX_INT8 ((int8)0x7f) #define MIN_UINT8 ((uint8)0) #define MAX_UINT8 ((uint8)0xff) #define MIN_INT16 ((int16)0x8000) #define MAX_INT16 ((int16)0x7fff) #define MIN_UINT16 ((uint16)0) #define MAX_UINT16 ((uint16)0xffff) #define MIN_INT32 ((int32)0x80000000) #define MAX_INT32 ((int32)0x7fffffff) #define MIN_UINT32 ((uint32)0) #define MAX_UINT32 ((uint32)0xffffffff) #define MIN_INT64 (CONST64(0x8000000000000000)) #define MAX_INT64 (CONST64(0x7fffffffffffffff)) #define MIN_UINT64 (CONST64U(0)) #define MAX_UINT64 (CONST64U(0xffffffffffffffff)) typedef uint8 *TCA; /* Pointer into TC (usually). */ /* * Type big enough to hold an integer between 0..100 */ typedef uint8 Percent; #define AsPercent(v) ((Percent)(v)) typedef uintptr_t VA; typedef uintptr_t VPN; typedef uint64 PA; typedef uint32 PPN; typedef uint64 TPA; typedef uint32 TPPN; typedef uint64 PhysMemOff; typedef uint64 PhysMemSize; typedef uint64 BA; #ifdef VMKERNEL typedef void *BPN; #else typedef uint64 BPN; #endif #define UINT64_2_BPN(u) ((BPN)(u)) #define BPN_2_UINT64(b) ((uint64)(b)) typedef uint32 PageCnt; typedef uint64 PgCnt64; typedef uint32 PageNum; typedef uint64 PgNum64; typedef unsigned MemHandle; typedef unsigned int IoHandle; typedef int32 World_ID; /* !! do not alter the definition of INVALID_WORLD_ID without ensuring * that the values defined in both bora/public/vm_basic_types.h and * lib/vprobe/vm_basic_types.h are the same. Additionally, the definition * of VMK_INVALID_WORLD_ID in vmkapi_world.h also must be defined with * the same value */ #define INVALID_WORLD_ID ((World_ID)0) typedef World_ID User_CartelID; #define INVALID_CARTEL_ID INVALID_WORLD_ID typedef User_CartelID User_SessionID; #define INVALID_SESSION_ID INVALID_CARTEL_ID typedef User_CartelID User_CartelGroupID; #define INVALID_CARTELGROUP_ID INVALID_CARTEL_ID typedef uint32 Worldlet_ID; #define INVALID_WORLDLET_ID ((Worldlet_ID)-1) typedef int8 Reg8; typedef int16 Reg16; typedef int32 Reg32; typedef int64 Reg64; typedef uint8 UReg8; typedef uint16 UReg16; typedef uint32 UReg32; typedef uint64 UReg64; #if defined(__GNUC__) && defined(__SIZEOF_INT128__) typedef int128 Reg128; typedef uint128 UReg128; #endif #if defined(VMM) || defined(COREQUERY) || defined(EXTDECODER) || \ defined (VMKERNEL) || defined (VMKBOOT) typedef Reg64 Reg; typedef UReg64 UReg; #endif typedef uint64 MA; typedef uint32 MPN32; /* * This type should be used for variables that contain sector * position/quantity. */ typedef uint64 SectorType; /* * Linear address */ typedef uintptr_t LA; typedef uintptr_t LPN; #define LA_2_LPN(_la) ((_la) >> PAGE_SHIFT) #define LPN_2_LA(_lpn) ((_lpn) << PAGE_SHIFT) #define LAST_LPN ((((LA) 1) << (8 * sizeof(LA) - PAGE_SHIFT)) - 1) #define LAST_LPN32 ((((LA32)1) << (8 * sizeof(LA32) - PAGE_SHIFT)) - 1) #define LAST_LPN64 ((((LA64)1) << (8 * sizeof(LA64) - PAGE_SHIFT)) - 1) /* Valid bits in a LPN. */ #define LPN_MASK LAST_LPN #define LPN_MASK32 LAST_LPN32 #define LPN_MASK64 LAST_LPN64 /* * On 64 bit platform, address and page number types default * to 64 bit. When we need to represent a 32 bit address, we use * types defined below. * * On 32 bit platform, the following types are the same as the * default types. */ typedef uint32 VA32; typedef uint32 VPN32; typedef uint32 LA32; typedef uint32 LPN32; typedef uint32 PA32; typedef uint32 PPN32; /* * On 64 bit platform, the following types are the same as the * default types. */ typedef uint64 VA64; typedef uint64 VPN64; typedef uint64 LA64; typedef uint64 LPN64; typedef uint64 PA64; typedef uint64 PPN64; typedef uint64 TPPN64; typedef uint64 MA64; typedef uint64 MPN; /* * Remove after PPN->PPN64 conversion is finished. */ #define PPN64_2_PPN(_ppn) ((PPN)_ppn) #define FMTPPN "" /* * IO device DMA virtual address and page number (translated by IOMMU to * MA/MPN). IOPN can be in the inclusive range 0 -> MAX_IOPN. */ typedef uint64 IOA; typedef uint64 IOPN; /* * VA typedefs for user world apps. */ typedef VA32 UserVA32; typedef VA64 UserVA64; typedef UserVA64 UserVAConst; /* Userspace ptr to data that we may only read. */ typedef UserVA32 UserVA32Const; /* Userspace ptr to data that we may only read. */ typedef UserVA64 UserVA64Const; /* Used by 64-bit syscalls until conversion is finished. */ #ifdef VMKERNEL typedef UserVA64 UserVA; #else typedef void * UserVA; #endif #define MAX_PPN_BITS 31 #define MAX_PPN (((PPN)1 << MAX_PPN_BITS) - 1) /* Maximal observable PPN value. */ #define INVALID_PPN ((PPN)0xffffffff) #define INVALID_PPN32 ((PPN32)0xffffffff) #define INVALID_PPN64 ((PPN64)0xffffffffffffffffull) #define APIC_INVALID_PPN ((PPN)0xfffffffe) #define INVALID_BPN ((BPN)0x000000ffffffffffull) #define MPN38_MASK ((1ull << 38) - 1) #define RESERVED_MPN ((MPN)0) #define INVALID_MPN ((MPN)MPN38_MASK) #define MEMREF_MPN ((MPN)MPN38_MASK - 1) #define RELEASED_MPN ((MPN)MPN38_MASK - 2) /* account for special MPNs defined above */ #define MAX_MPN ((MPN)MPN38_MASK - 3) /* 50 bits of address space */ #define INVALID_IOPN ((IOPN)-1) #define MAX_IOPN (INVALID_IOPN - 1) #define INVALID_LPN ((LPN)-1) #define INVALID_VPN ((VPN)-1) #define INVALID_LPN64 ((LPN64)-1) #define INVALID_PAGENUM ((PageNum)-1) /* * Format modifier for printing VA, LA, and VPN. * Use them like this: Log("%#" FMTLA "x\n", laddr) */ #if defined(VMM) || defined(FROBOS64) || vm_x86_64 || vm_arm_64 || defined __APPLE__ # define FMTLA "l" # define FMTVA "l" # define FMTVPN "l" #else # define FMTLA "" # define FMTVA "" # define FMTVPN "" #endif #ifndef EXTERN #define EXTERN extern #endif #define CONST const #ifndef INLINE # ifdef _MSC_VER /* * On UWP(Universal Windows Platform), * Only X86 32bit support '__inline' */ # if defined(VM_WIN_UWP) && !defined(VM_X86_32) # define INLINE # else # define INLINE __inline # endif # else # define INLINE inline # endif #endif /* * Annotation for data that may be exported into a DLL and used by other * apps that load that DLL and import the data. */ #if defined(_WIN32) && defined(VMX86_IMPORT_DLLDATA) # define VMX86_EXTERN_DATA extern __declspec(dllimport) #else // !_WIN32 # define VMX86_EXTERN_DATA extern #endif #ifdef _WIN32 /* under windows, __declspec(thread) is supported since VS 2003 */ #define __thread __declspec(thread) #else /* * under other platforms instead, __thread is supported by gcc since * version 3.3.1 and by clang since version 3.x */ #endif /* * Due to the wonderful "registry redirection" feature introduced in * 64-bit Windows, if you access any key under HKLM\Software in 64-bit * code, you need to open/create/delete that key with * VMKEY_WOW64_32KEY if you want a consistent view with 32-bit code. */ #ifdef _WIN32 #ifdef _WIN64 #define VMW_KEY_WOW64_32KEY KEY_WOW64_32KEY #else #define VMW_KEY_WOW64_32KEY 0x0 #endif #endif /* * At present, we effectively require a compiler that is at least * gcc-3.3 (circa 2003). Enforce this here, various things below * this line depend upon it. * * In practice, most things presently compile with gcc-4.1 or gcc-4.4. * The various linux kernel modules may use older (gcc-3.3) compilers. */ #if defined __GNUC__ && (__GNUC__ < 3 || (__GNUC__ == 3 && __GNUC_MINOR__ < 3)) #error "gcc version is too old to compile assembly, need gcc-3.3 or better" #endif /* * Similarly, we require a compiler that is at least vc80 (vs2005). * Enforce this here. */ #if defined _MSC_VER && _MSC_VER < 1400 #error "cl.exe version is too old, need vc80 or better" #endif /* * Consider the following reasons functions are inlined: * * 1) inlined for performance reasons * 2) inlined because it's a single-use function * * Functions which meet only condition 2 should be marked with this * inline macro; It is not critical to be inlined (but there is a * code-space & runtime savings by doing so), so when other callers * are added the inline-ness should be removed. */ #if defined __GNUC__ /* * Starting at version 3.3, gcc does not always inline functions marked * 'inline' (it depends on their size and other factors). To force gcc * to inline a function, one must use the __always_inline__ attribute. * This attribute should be used sparingly and with care. It is usually * preferable to let gcc make its own inlining decisions */ # define INLINE_ALWAYS INLINE __attribute__((__always_inline__)) #else # define INLINE_ALWAYS INLINE #endif #define INLINE_SINGLE_CALLER INLINE_ALWAYS /* * Used when a hard guarantee of no inlining is needed. Very few * instances need this since the absence of INLINE is a good hint * that gcc will not do inlining. */ #if defined(__GNUC__) #define ABSOLUTELY_NOINLINE __attribute__((__noinline__)) #elif defined(_MSC_VER) #define ABSOLUTELY_NOINLINE __declspec(noinline) #endif /* * Used when a function has no effects except the return value and the * return value depends only on the parameters and/or global variables * Such a function can be subject to common subexpression elimination * and loop optimization just as an arithmetic operator would be. */ #if defined(__GNUC__) && (defined(VMM) || defined (VMKERNEL)) #define SIDE_EFFECT_FREE __attribute__((__pure__)) #else #define SIDE_EFFECT_FREE #endif /* * Used when a function exmaines no input other than its arguments and * has no side effects other than its return value. Stronger than * SIDE_EFFECT_FREE as the function is not allowed to read from global * memory. */ #if defined(__GNUC__) && (defined(VMM) || defined (VMKERNEL)) #define CONST_FUNCTION __attribute__((__const__)) #else #define CONST_FUNCTION #endif /* * Attributes placed on function declarations to tell the compiler * that the function never returns. */ #ifdef _MSC_VER #define NORETURN __declspec(noreturn) #elif defined __GNUC__ #define NORETURN __attribute__((__noreturn__)) #else #define NORETURN #endif /* * Static profiling hints for functions. * A function can be either hot, cold, or neither. * It is an error to specify both hot and cold for the same function. * Note that there is no annotation for "neither." */ #if defined __GNUC__ && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) #define HOT __attribute__((hot)) #define COLD __attribute__((cold)) #else #define HOT #define COLD #endif /* * Branch prediction hints: * LIKELY(exp) - Expression exp is likely TRUE. * UNLIKELY(exp) - Expression exp is likely FALSE. * Usage example: * if (LIKELY(excCode == EXC_NONE)) { * or * if (UNLIKELY(REAL_MODE(vc))) { * * We know how to predict branches on gcc3 and later (hopefully), * all others we don't so we do nothing. */ #if defined __GNUC__ /* * gcc3 uses __builtin_expect() to inform the compiler of an expected value. * We use this to inform the static branch predictor. The '!!' in LIKELY * will convert any !=0 to a 1. */ #define LIKELY(_exp) __builtin_expect(!!(_exp), 1) #define UNLIKELY(_exp) __builtin_expect((_exp), 0) #else #define LIKELY(_exp) (_exp) #define UNLIKELY(_exp) (_exp) #endif /* * GCC's argument checking for printf-like functions * This is conditional until we have replaced all `"%x", void *' * with `"0x%08x", (uint32) void *'. Note that %p prints different things * on different platforms. Argument checking is enabled for the * vmkernel, which has already been cleansed. * * fmtPos is the position of the format string argument, beginning at 1 * varPos is the position of the variable argument, beginning at 1 */ #if defined(__GNUC__) # define PRINTF_DECL(fmtPos, varPos) __attribute__((__format__(__printf__, fmtPos, varPos))) #else # define PRINTF_DECL(fmtPos, varPos) #endif #if defined(__GNUC__) # define SCANF_DECL(fmtPos, varPos) __attribute__((__format__(__scanf__, fmtPos, varPos))) #else # define SCANF_DECL(fmtPos, varPos) #endif /* * UNUSED_PARAM should surround the parameter name and type declaration, * e.g. "int MyFunction(int var1, UNUSED_PARAM(int var2))" * */ #ifndef UNUSED_PARAM # if defined(__GNUC__) # define UNUSED_PARAM(_parm) _parm __attribute__((__unused__)) # elif defined _MSC_VER # define UNUSED_PARAM(_parm) __pragma(warning(suppress:4100)) _parm # else # define UNUSED_PARAM(_parm) _parm # endif #endif #ifndef UNUSED_TYPE // XXX _Pragma would better but doesn't always work right now. # define UNUSED_TYPE(_parm) UNUSED_PARAM(_parm) #endif #ifndef UNUSED_VARIABLE // XXX is there a better way? # define UNUSED_VARIABLE(_var) (void)_var #endif /* * gcc can warn us if we're ignoring returns */ #if defined(__GNUC__) # define MUST_CHECK_RETURN __attribute__((warn_unused_result)) #else # define MUST_CHECK_RETURN #endif /* * ALIGNED specifies minimum alignment in "n" bytes. */ #ifdef __GNUC__ #define ALIGNED(n) __attribute__((__aligned__(n))) #else #define ALIGNED(n) #endif /* * Encapsulate the syntactic differences between gcc and msvc alignment control. * BOUNDARY must match in the prefix and suffix. */ #ifdef _WIN32 #define ALIGN_PREFIX(BOUNDRY) __declspec(align(BOUNDRY)) #define ALIGN_SUFFIX(BOUNDRY) #else #define ALIGN_PREFIX(BOUNDRY) #define ALIGN_SUFFIX(BOUNDRY) __attribute__((__aligned__(BOUNDRY))) #endif /* * Once upon a time, this was used to silence compiler warnings that * get generated when the compiler thinks that a function returns * when it is marked noreturn. Don't do it. Use NOT_REACHED(). */ #define INFINITE_LOOP() do { } while (1) /* * On FreeBSD (for the tools build), size_t is typedef'd if _BSD_SIZE_T_ * is defined. Use the same logic here so we don't define it twice. [greg] */ #ifdef __FreeBSD__ # ifdef _BSD_SIZE_T_ # undef _BSD_SIZE_T_ # ifdef VM_I386 # ifdef VM_X86_64 typedef uint64 size_t; # else typedef uint32 size_t; # endif # endif /* VM_I386 */ # endif # ifdef _BSD_SSIZE_T_ # undef _BSD_SSIZE_T_ # ifdef VM_I386 # ifdef VM_X86_64 typedef int64 ssize_t; # else typedef int32 ssize_t; # endif # endif /* VM_I386 */ # endif #else # if !defined(_SIZE_T) && !defined(_SIZE_T_DEFINED) # ifdef VM_I386 # define _SIZE_T # ifdef VM_X86_64 typedef uint64 size_t; # else typedef uint32 size_t; # endif # elif defined(VM_ARM_64) # define _SIZE_T typedef uint64 size_t; # elif defined(__arm__) # define _SIZE_T typedef uint32 size_t; # endif # endif # if !defined(FROBOS) && !defined(_SSIZE_T) && !defined(_SSIZE_T_) && \ !defined(ssize_t) && !defined(__ssize_t_defined) && \ !defined(_SSIZE_T_DECLARED) && !defined(_SSIZE_T_DEFINED) && \ !defined(_SSIZE_T_DEFINED_) # ifdef VM_I386 # define _SSIZE_T # define __ssize_t_defined # define _SSIZE_T_DECLARED # define _SSIZE_T_DEFINED_ # ifdef VM_X86_64 typedef int64 ssize_t; # else typedef int32 ssize_t; # endif # elif defined(VM_ARM_64) # define _SSIZE_T # define __ssize_t_defined # define _SSIZE_T_DECLARED # define _SSIZE_T_DEFINED_ typedef int64 ssize_t; # elif defined(__arm__) # define _SSIZE_T # define __ssize_t_defined # define _SSIZE_T_DECLARED # define _SSIZE_T_DEFINED_ typedef int32 ssize_t; # endif # endif #endif /* * Format modifier for printing pid_t. On sun the pid_t is a ulong, but on * Linux it's an int. * Use this like this: printf("The pid is %" FMTPID ".\n", pid); */ #ifdef sun # ifdef VM_X86_64 # define FMTPID "d" # else # define FMTPID "lu" # endif #else # define FMTPID "d" #endif /* * Format modifier for printing uid_t. On Solaris 10 and earlier, uid_t * is a ulong, but on other platforms it's an unsigned int. * Use this like this: printf("The uid is %" FMTUID ".\n", uid); */ #if defined(sun) && !defined(SOL11) # ifdef VM_X86_64 # define FMTUID "u" # else # define FMTUID "lu" # endif #else # define FMTUID "u" #endif /* * Format modifier for printing mode_t. On sun the mode_t is a ulong, but on * Linux it's an int. * Use this like this: printf("The mode is %" FMTMODE ".\n", mode); */ #ifdef sun # ifdef VM_X86_64 # define FMTMODE "o" # else # define FMTMODE "lo" # endif #else # define FMTMODE "o" #endif #ifdef __APPLE__ /* * Format specifier for all these annoying types such as {S,U}Int32 * which are 'long' in 32-bit builds * and 'int' in 64-bit builds. */ # ifdef __LP64__ # define FMTLI "" # else # define FMTLI "l" # endif /* * Format specifier for all these annoying types such as NS[U]Integer * which are 'int' in 32-bit builds * and 'long' in 64-bit builds. */ # ifdef __LP64__ # define FMTIL "l" # else # define FMTIL "" # endif #endif /* * Define type for poll device handles. */ typedef int64 PollDevHandle; /* * Define the utf16_t type. */ #if defined(_WIN32) && defined(_NATIVE_WCHAR_T_DEFINED) typedef wchar_t utf16_t; #else typedef uint16 utf16_t; #endif /* * Define for point and rectangle types. Defined here so they * can be used by other externally facing headers in bora/public. */ typedef struct VMPoint { int x, y; } VMPoint; #if defined _WIN32 && defined USERLEVEL struct tagRECT; typedef struct tagRECT VMRect; #else typedef struct VMRect { int left; int top; int right; int bottom; } VMRect; #endif /* * ranked locks "everywhere" */ typedef uint32 MX_Rank; #endif /* _VM_BASIC_TYPES_H_ */ vmhgfs-only/shared/vmware_pack_begin.h 0000444 0000000 0000000 00000002451 13432725350 017113 0 ustar root root /********************************************************* * Copyright (C) 2002-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmware_pack_begin.h -- * * Begin of structure packing. See vmware_pack_init.h for details. * * Note that we do not use the following construct in this include file, * because we want to emit the code every time the file is included --hpreg * * #ifndef foo * # define foo * ... * #endif * */ #include "vmware_pack_init.h" #ifdef _MSC_VER # pragma pack(push, 1) #elif __GNUC__ #else # error Compiler packing... #endif vmhgfs-only/shared/backdoor_types.h 0000444 0000000 0000000 00000006773 13432725350 016473 0 ustar root root /********************************************************* * Copyright (C) 1999-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * backdoor_types.h -- * * Type definitions for backdoor interaction code. */ #ifndef _BACKDOOR_TYPES_H_ #define _BACKDOOR_TYPES_H_ #ifndef VM_I386 #error The backdoor protocol is only supported on x86 architectures. #endif /* * These #defines are intended for defining register structs as part of * existing named unions. If the union should encapsulate the register * (and nothing else), use DECLARE_REG_NAMED_STRUCT defined below. */ #define DECLARE_REG32_STRUCT \ struct { \ uint16 low; \ uint16 high; \ } halfs; \ uint32 word #define DECLARE_REG64_STRUCT \ DECLARE_REG32_STRUCT; \ struct { \ uint32 low; \ uint32 high; \ } words; \ uint64 quad #if defined (VM_X86_64) || defined (VM_ARM_64) #define DECLARE_REG_STRUCT DECLARE_REG64_STRUCT #else #define DECLARE_REG_STRUCT DECLARE_REG32_STRUCT #endif #define DECLARE_REG_NAMED_STRUCT(_r) \ union { DECLARE_REG_STRUCT; } _r /* * Some of the registers are expressed by semantic name, because if they were * expressed as register structs declared above, we could only address them * by fixed size (half-word, word, quad, etc.) instead of by varying size * (size_t, uintptr_t). * * To be cleaner, these registers are expressed ONLY by semantic name, * rather than by a union of the semantic name and a register struct. */ typedef union { struct { DECLARE_REG_NAMED_STRUCT(ax); size_t size; /* Register bx. */ DECLARE_REG_NAMED_STRUCT(cx); DECLARE_REG_NAMED_STRUCT(dx); DECLARE_REG_NAMED_STRUCT(si); DECLARE_REG_NAMED_STRUCT(di); } in; struct { DECLARE_REG_NAMED_STRUCT(ax); DECLARE_REG_NAMED_STRUCT(bx); DECLARE_REG_NAMED_STRUCT(cx); DECLARE_REG_NAMED_STRUCT(dx); DECLARE_REG_NAMED_STRUCT(si); DECLARE_REG_NAMED_STRUCT(di); } out; } Backdoor_proto; typedef union { struct { DECLARE_REG_NAMED_STRUCT(ax); DECLARE_REG_NAMED_STRUCT(bx); size_t size; /* Register cx. */ DECLARE_REG_NAMED_STRUCT(dx); uintptr_t srcAddr; /* Register si. */ uintptr_t dstAddr; /* Register di. */ DECLARE_REG_NAMED_STRUCT(bp); } in; struct { DECLARE_REG_NAMED_STRUCT(ax); DECLARE_REG_NAMED_STRUCT(bx); DECLARE_REG_NAMED_STRUCT(cx); DECLARE_REG_NAMED_STRUCT(dx); DECLARE_REG_NAMED_STRUCT(si); DECLARE_REG_NAMED_STRUCT(di); DECLARE_REG_NAMED_STRUCT(bp); } out; } Backdoor_proto_hb; MY_ASSERTS(BACKDOOR_STRUCT_SIZES, ASSERT_ON_COMPILE(sizeof(Backdoor_proto) == 6 * sizeof(uintptr_t)); ASSERT_ON_COMPILE(sizeof(Backdoor_proto_hb) == 7 * sizeof(uintptr_t)); ) #undef DECLARE_REG_STRUCT #endif /* _BACKDOOR_TYPES_H_ */ vmhgfs-only/shared/compat_workqueue.h 0000444 0000000 0000000 00000014361 13432725347 017053 0 ustar root root /********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_WORKQUEUE_H__ # define __COMPAT_WORKQUEUE_H__ #include <linux/kernel.h> #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) # include <linux/workqueue.h> #endif /* * * Work queues and delayed work queues. * * Prior to 2.5.41, the notion of work queues did not exist. Taskqueues are * used for work queues and timers are used for delayed work queues. * * After 2.6.20, normal work structs ("work_struct") and delayed work * ("delayed_work") structs were separated so that the work_struct could be * slimmed down. The interface was also changed such that the address of the * work_struct itself is passed in as the argument to the work function. This * requires that one embed the work struct in the larger struct containing the * information necessary to complete the work and use container_of() to obtain * the address of the containing structure. * * Users of these macros should embed a compat_work or compat_delayed_work in * a larger structure, then specify the larger structure as the _data argument * for the initialization functions, specify the work function to take * a compat_work_arg or compat_delayed_work_arg, then use the appropriate * _GET_DATA macro to obtain the reference to the structure passed in as _data. * An example is below. * * * typedef struct WorkData { * int data; * compat_work work; * } WorkData; * * * void * WorkFunc(compat_work_arg data) * { * WorkData *workData = COMPAT_WORK_GET_DATA(data, WorkData, work); * * ... * } * * * { * WorkData *workData = kmalloc(sizeof *workData, GFP_EXAMPLE); * if (!workData) { * return -ENOMEM; * } * * COMPAT_INIT_WORK(&workData->work, WorkFunc, workData); * compat_schedule_work(&workData->work); * } */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 41) /* { */ typedef struct tq_struct compat_work; typedef struct compat_delayed_work { struct tq_struct work; struct timer_list timer; } compat_delayed_work; typedef void * compat_work_arg; typedef void * compat_delayed_work_arg; /* * Delayed work queues need to run at some point in the future in process * context, but task queues don't support delaying the task one is scheduling. * Timers allow us to delay the execution of our work queue until the future, * but timer handlers run in bottom-half context. As such, we use both a timer * and task queue and use the timer handler below to schedule the task in * process context immediately. The timer lets us delay execution, and the * task queue lets us run in process context. * * Note that this is similar to how delayed_work is implemented with work * queues in later kernel versions. */ static inline void __compat_delayed_work_timer(unsigned long arg) { compat_delayed_work *dwork = (compat_delayed_work *)arg; if (dwork) { schedule_task(&dwork->work); } } # define COMPAT_INIT_WORK(_work, _func, _data) \ INIT_LIST_HEAD(&(_work)->list); \ (_work)->sync = 0; \ (_work)->routine = _func; \ (_work)->data = _data # define COMPAT_INIT_DELAYED_WORK(_work, _func, _data) \ COMPAT_INIT_WORK(&(_work)->work, _func, _data); \ init_timer(&(_work)->timer); \ (_work)->timer.expires = 0; \ (_work)->timer.function = __compat_delayed_work_timer; \ (_work)->timer.data = (unsigned long)_work # define compat_schedule_work(_work) \ schedule_task(_work) # define compat_schedule_delayed_work(_work, _delay) \ (_work)->timer.expires = jiffies + _delay; \ add_timer(&(_work)->timer) # define COMPAT_WORK_GET_DATA(_p, _type, _member) \ (_type *)(_p) # define COMPAT_DELAYED_WORK_GET_DATA(_p, _type, _member) \ (_type *)(_p) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) \ && !defined(__VMKLNX__) /* } { */ typedef struct work_struct compat_work; typedef struct work_struct compat_delayed_work; typedef void * compat_work_arg; typedef void * compat_delayed_work_arg; # define COMPAT_INIT_WORK(_work, _func, _data) \ INIT_WORK(_work, _func, _data) # define COMPAT_INIT_DELAYED_WORK(_work, _func, _data) \ INIT_WORK(_work, _func, _data) # define compat_schedule_work(_work) \ schedule_work(_work) # define compat_schedule_delayed_work(_work, _delay) \ schedule_delayed_work(_work, _delay) # define COMPAT_WORK_GET_DATA(_p, _type, _member) \ (_type *)(_p) # define COMPAT_DELAYED_WORK_GET_DATA(_p, _type, _member) \ (_type *)(_p) #else /* } Linux >= 2.6.20 { */ typedef struct work_struct compat_work; typedef struct delayed_work compat_delayed_work; typedef struct work_struct * compat_work_arg; typedef struct work_struct * compat_delayed_work_arg; # define COMPAT_INIT_WORK(_work, _func, _data) \ INIT_WORK(_work, _func) # define COMPAT_INIT_DELAYED_WORK(_work, _func, _data) \ INIT_DELAYED_WORK(_work, _func) # define compat_schedule_work(_work) \ schedule_work(_work) # define compat_schedule_delayed_work(_work, _delay) \ schedule_delayed_work(_work, _delay) # define COMPAT_WORK_GET_DATA(_p, _type, _member) \ container_of(_p, _type, _member) # define COMPAT_DELAYED_WORK_GET_DATA(_p, _type, _member) \ container_of(_p, _type, _member.work) #endif /* } */ #endif /* __COMPAT_WORKQUEUE_H__ */ vmhgfs-only/shared/mul64.h 0000444 0000000 0000000 00000007233 13432725350 014422 0 ustar root root /********************************************************* * Copyright (C) 2003-2014,2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * mul64.h * * Integer by fixed point multiplication, with rounding. * * These routines are implemented in assembly language for most * supported platforms. This file has plain C fallback versions. */ #ifndef _MUL64_H_ #define _MUL64_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" #include "vm_basic_asm.h" #if defined __cplusplus extern "C" { #endif #ifdef MUL64_NO_ASM /* *----------------------------------------------------------------------------- * * Mul64x3264 -- * * Unsigned integer by fixed point multiplication, with rounding: * result = floor(multiplicand * multiplier * 2**(-shift) + 0.5) * * Unsigned 64-bit integer multiplicand. * Unsigned 32-bit fixed point multiplier, represented as * (multiplier, shift), where shift < 64. * * Result: * Unsigned 64-bit integer product. * *----------------------------------------------------------------------------- */ static INLINE uint64 Mul64x3264(uint64 multiplicand, uint32 multiplier, uint32 shift) { uint64 lo, hi, lo2, hi2; unsigned carry; // ASSERT(shift >= 0 && shift < 64); lo = (multiplicand & 0xffffffff) * multiplier; hi = (multiplicand >> 32) * multiplier; lo2 = lo + (hi << 32); carry = lo2 < lo; hi2 = (hi >> 32) + carry; if (shift == 0) { return lo2; } else { return (lo2 >> shift) + (hi2 << (64 - shift)) + ((lo2 >> (shift - 1)) & 1); } } /* *----------------------------------------------------------------------------- * * Muls64x32s64 -- * * Signed integer by fixed point multiplication, with rounding: * result = floor(multiplicand * multiplier * 2**(-shift) + 0.5) * * Signed 64-bit integer multiplicand. * Unsigned 32-bit fixed point multiplier, represented as * (multiplier, shift), where shift < 64. * * Result: * Signed 64-bit integer product. * *----------------------------------------------------------------------------- */ static INLINE int64 Muls64x32s64(int64 multiplicand, uint32 multiplier, uint32 shift) { uint64 lo, hi, lo2, hi2; unsigned carry; // ASSERT(shift >= 0 && shift < 64); hi = ((uint64)multiplicand >> 32) * multiplier; if (multiplicand < 0) { hi -= (uint64)multiplier << 32; } lo = ((uint64)multiplicand & 0xffffffff) * multiplier; lo2 = lo + (hi << 32); carry = lo2 < lo; hi2 = (((int64)hi >> 32) + carry); if (shift == 0) { return lo2; } else { return (lo2 >> shift) + (hi2 << (64 - shift)) + ((lo2 >> (shift - 1)) & 1); } } #endif #if defined __cplusplus } // extern "C" #endif #endif // _MUL64_NOASM_H_ vmhgfs-only/shared/compat_sched.h 0000444 0000000 0000000 00000024236 13432725347 016114 0 ustar root root /********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_SCHED_H__ # define __COMPAT_SCHED_H__ #include <linux/sched.h> /* CLONE_KERNEL available in 2.5.35 and higher. */ #ifndef CLONE_KERNEL #define CLONE_KERNEL CLONE_FILES | CLONE_FS | CLONE_SIGHAND #endif /* TASK_COMM_LEN become available in 2.6.11. */ #ifndef TASK_COMM_LEN #define TASK_COMM_LEN 16 #endif /* The capable() API appeared in 2.1.92 --hpreg */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 1, 92) # define capable(_capability) suser() #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0) # define need_resched() need_resched #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 3) # define need_resched() (current->need_resched) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 3) # define cond_resched() (need_resched() ? schedule() : (void) 0) #endif /* Oh well. We need yield... Happy us! */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 20) # ifdef __x86_64__ # define compat_yield() there_is_nothing_like_yield() # else # include <linux/unistd.h> # include <linux/kernel.h> /* * Used by _syscallX macros. Note that this is global variable, so * do not rely on its contents too much. As exit() is only function * we use, and we never check return value from exit(), we have * no problem... */ extern int errno; /* * compat_exit() provides an access to the exit() function. It must * be named compat_exit(), as exit() (with different signature) is * provided by x86-64, arm and other (but not by i386). */ # define __NR_compat_yield __NR_sched_yield static inline _syscall0(int, compat_yield); # endif #else # define compat_yield() yield() #endif /* * Since 2.5.34 there are two methods to enumerate tasks: * for_each_process(p) { ... } which enumerates only tasks and * do_each_thread(g,t) { ... } while_each_thread(g,t) which enumerates * also threads even if they share same pid. */ #ifndef for_each_process # define for_each_process(p) for_each_task(p) #endif #ifndef do_each_thread # define do_each_thread(g, t) for_each_task(g) { t = g; do # define while_each_thread(g, t) while (0) } #endif /* * Lock for signal mask is moving target... */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 40) && defined(CLONE_PID) /* 2.4.x without NPTL patches or early 2.5.x */ #define compat_sigmask_lock sigmask_lock #define compat_dequeue_signal_current(siginfo_ptr) \ dequeue_signal(¤t->blocked, (siginfo_ptr)) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 60) && !defined(INIT_SIGHAND) /* RedHat's 2.4.x with first version of NPTL support, or 2.5.40 to 2.5.59 */ #define compat_sigmask_lock sig->siglock #define compat_dequeue_signal_current(siginfo_ptr) \ dequeue_signal(¤t->blocked, (siginfo_ptr)) #else /* RedHat's 2.4.x with second version of NPTL support, or 2.5.60+. */ #define compat_sigmask_lock sighand->siglock #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) #define compat_dequeue_signal_current(siginfo_ptr) \ dequeue_signal(¤t->blocked, (siginfo_ptr)) #else #define compat_dequeue_signal_current(siginfo_ptr) \ dequeue_signal(current, ¤t->blocked, (siginfo_ptr)) #endif #endif /* * recalc_sigpending() had task argument in the past */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 29) && defined(CLONE_PID) /* 2.4.x without NPTL patches or early 2.5.x */ #define compat_recalc_sigpending() recalc_sigpending(current) #else /* RedHat's 2.4.x with NPTL support, or 2.5.29+ */ #define compat_recalc_sigpending() recalc_sigpending() #endif /* * reparent_to_init() was introduced in 2.4.8. In 2.5.38 (or possibly * earlier, but later than 2.5.31) a call to it was added into * daemonize(), so compat_daemonize no longer needs to call it. * * In 2.4.x kernels reparent_to_init() forgets to do correct refcounting * on current->user. It is better to count one too many than one too few... */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 8) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 38) #define compat_reparent_to_init() do { \ reparent_to_init(); \ atomic_inc(¤t->user->__count); \ } while (0) #else #define compat_reparent_to_init() do {} while (0) #endif /* * daemonize appeared in 2.2.18. Except 2.2.17-4-RH7.0, which has it too. * Fortunately 2.2.17-4-RH7.0 uses versioned symbols, so we can check * its existence with defined(). */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)) && !defined(daemonize) static inline void daemonize(void) { struct fs_struct *fs; exit_mm(current); current->session = 1; current->pgrp = 1; exit_fs(current); fs = init_task.fs; current->fs = fs; atomic_inc(&fs->count); } #endif /* * flush_signals acquires sighand->siglock since 2.5.61... Verify RH's kernels! */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61) #define compat_flush_signals(task) do { \ spin_lock_irq(&task->compat_sigmask_lock); \ flush_signals(task); \ spin_unlock_irq(&task->compat_sigmask_lock); \ } while (0) #else #define compat_flush_signals(task) flush_signals(task) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61) #define compat_allow_signal(signr) do { \ spin_lock_irq(¤t->compat_sigmask_lock); \ sigdelset(¤t->blocked, signr); \ compat_recalc_sigpending(); \ spin_unlock_irq(¤t->compat_sigmask_lock); \ } while (0) #else #define compat_allow_signal(signr) allow_signal(signr) #endif /* * daemonize can set process name since 2.5.61. Prior to 2.5.61, daemonize * didn't block signals on our behalf. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 61) #define compat_daemonize(x...) \ ({ \ /* Beware! No snprintf here, so verify arguments! */ \ sprintf(current->comm, x); \ \ /* Block all signals. */ \ spin_lock_irq(¤t->compat_sigmask_lock); \ sigfillset(¤t->blocked); \ compat_recalc_sigpending(); \ spin_unlock_irq(¤t->compat_sigmask_lock); \ compat_flush_signals(current); \ \ daemonize(); \ compat_reparent_to_init(); \ }) #else #define compat_daemonize(x...) daemonize(x) #endif /* * try to freeze a process. For kernels 2.6.11 or newer, we know how to choose * the interface. The problem is that the oldest interface, introduced in * 2.5.18, was backported to 2.4.x kernels. So if we're older than 2.6.11, * we'll decide what to do based on whether or not swsusp was configured * for the kernel. For kernels 2.6.20 and newer, we'll also need to include * freezer.h since the try_to_freeze definition was pulled out of sched.h. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) #include <linux/freezer.h> #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) || defined(VMW_TL10S64_WORKAROUND) #define compat_try_to_freeze() try_to_freeze() #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) #define compat_try_to_freeze() try_to_freeze(PF_FREEZE) #elif defined(CONFIG_SOFTWARE_SUSPEND) || defined(CONFIG_SOFTWARE_SUSPEND2) #include "compat_mm.h" #include <linux/errno.h> #include <linux/suspend.h> static inline int compat_try_to_freeze(void) { if (current->flags & PF_FREEZE) { refrigerator(PF_FREEZE); return 1; } else { return 0; } } #else static inline int compat_try_to_freeze(void) { return 0; } #endif /* * As of 2.6.23-rc1, kernel threads are no longer freezable by * default. Instead, kernel threads that need to be frozen must opt-in * by calling set_freezable() as soon as the thread is created. */ #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 22) #define compat_set_freezable() do { set_freezable(); } while (0) #else #define compat_set_freezable() do {} while (0) #endif /* * Around 2.6.27 kernel stopped sending signals to kernel * threads being frozen, instead threads have to check * freezing() or use wait_event_freezable(). Unfortunately * wait_event_freezable() completely hides the fact that * thread was frozen from calling code and sometimes we do * want to know that. */ #ifdef PF_FREEZER_NOSIG #define compat_wait_check_freezing() freezing(current) #else #define compat_wait_check_freezing() (0) #endif /* * Since 2.6.27-rc2 kill_proc() is gone... Replacement (GPL-only!) * API is available since 2.6.19. Use them from 2.6.27-rc1 up. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) typedef int compat_pid; #define compat_find_get_pid(pid) (pid) #define compat_put_pid(pid) do { } while (0) #define compat_kill_pid(pid, sig, flag) kill_proc(pid, sig, flag) #else typedef struct pid * compat_pid; #define compat_find_get_pid(pid) find_get_pid(pid) #define compat_put_pid(pid) put_pid(pid) #define compat_kill_pid(pid, sig, flag) kill_pid(pid, sig, flag) #endif #endif /* __COMPAT_SCHED_H__ */ vmhgfs-only/shared/compat_page-flags.h 0000444 0000000 0000000 00000005037 13432725347 017032 0 ustar root root /********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_PAGE_FLAGS_H__ # define __COMPAT_PAGE_FLAGS_H__ /* No page-flags.h prior to 2.5.12. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 12) # include <linux/page-flags.h> #endif /* * The pgoff_t type was introduced in 2.5.20, but we'll look for it by * definition since it's more convenient. Note that we want to avoid a * situation where, in the future, a #define is changed to a typedef, * so if pgoff_t is not defined in some future kernel, we won't define it. */ #if !defined(pgoff_t) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) #define pgoff_t unsigned long #endif /* * set_page_writeback() was introduced in 2.6.6. Prior to that, callers were * using the SetPageWriteback() macro directly, so that's what we'll use. * Prior to 2.5.12, the writeback bit didn't exist, so we don't need to do * anything. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 12) #define compat_set_page_writeback(page) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 6) #define compat_set_page_writeback(page) SetPageWriteback(page) #else #define compat_set_page_writeback(page) set_page_writeback(page) #endif /* * end_page_writeback() was introduced in 2.5.12. Prior to that, it looks like * there was no page writeback bit, and everything the function accomplished * was done by unlock_page(), so we'll define it out. * * Note that we could just #define end_page_writeback to nothing and avoid * needing the compat_ prefix, but this is more complete with respect to * compat_set_page_writeback. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 12) #define compat_end_page_writeback(page) #else #define compat_end_page_writeback(page) end_page_writeback(page) #endif #endif /* __COMPAT_PAGE_FLAGS_H__ */ vmhgfs-only/shared/compat_fs.h 0000444 0000000 0000000 00000024277 13432725347 015443 0 ustar root root /********************************************************* * Copyright (C) 2006 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_FS_H__ # define __COMPAT_FS_H__ #include <linux/fs.h> /* * 2.6.5+ kernels define FS_BINARY_MOUNTDATA. Since it didn't exist and * wasn't used prior, it's safe to define it to zero. */ #ifndef FS_BINARY_MOUNTDATA #define FS_BINARY_MOUNTDATA 0 #endif /* * MAX_LFS_FILESIZE wasn't defined until 2.5.4. */ #ifndef MAX_LFS_FILESIZE # include <linux/pagemap.h> # if BITS_PER_LONG == 32 # define MAX_LFS_FILESIZE (((u64)PAGE_CACHE_SIZE << (BITS_PER_LONG - 1)) - 1) # elif BITS_PER_LONG == 64 # define MAX_LFS_FILESIZE 0x7fffffffffffffffUL # endif #endif /* * sendfile as a VFS op was born in 2.5.30. Unfortunately, it also changed * signatures, first in 2.5.47, then again in 2.5.70, then again in 2.6.8. * Luckily, the 2.6.8+ signature is the same as the 2.5.47 signature. And * as of 2.6.23-rc1 sendfile is gone, replaced by splice_read... * * Let's not support sendfile from 2.5.30 to 2.5.47, because the 2.5.30 * signature is much different and file_send_actor isn't externed. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 23) #define VMW_SENDFILE_NONE #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 8) #define VMW_SENDFILE_NEW #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 70) #define VMW_SENDFILE_OLD #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 47) #define VMW_SENDFILE_NEW #else #define VMW_SENDFILE_NONE #endif /* * splice_read is there since 2.6.17, but let's avoid 2.6.17-rcX kernels... * After all nobody is using splice system call until 2.6.23 using it to * implement sendfile. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18) #define VMW_SPLICE_READ 1 #endif /* * Filesystems wishing to use generic page cache read/write routines are * supposed to implement aio_read and aio_write (calling into * generic_file_aio_read() and generic_file_aio_write() if necessary). * * The VFS exports do_sync_read() and do_sync_write() as the "new" * generic_file_read() and generic_file_write(), but filesystems need not * actually implement read and write- the VFS will automatically call * do_sync_write() and do_sync_read() when applications invoke the standard * read() and write() system calls. * * In 2.6.19, generic_file_read() and generic_file_write() were removed, * necessitating this change. AIO dates as far back as 2.5.42, but the API has * changed over time, so for simplicity, we'll only enable it from 2.6.19 and * on. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) # define VMW_USE_AIO #endif /* * The alloc_inode and destroy_inode VFS ops didn't exist prior to 2.4.21. * Without these functions, file systems can't embed inodes. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 21) # define VMW_EMBED_INODE #endif /* * iget() was removed from the VFS as of 2.6.25-rc1. The replacement for iget() * is iget_locked() which was added in 2.5.17. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 17) # define VMW_USE_IGET_LOCKED #endif /* * parent_ino was born in 2.5.5. For older kernels, let's use 2.5.5 * implementation. It uses the dcache lock which is OK because per-dentry * locking appeared after 2.5.5. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 5) #define compat_parent_ino(dentry) parent_ino(dentry) #else #define compat_parent_ino(dentry) \ ({ \ ino_t res; \ spin_lock(&dcache_lock); \ res = dentry->d_parent->d_inode->i_ino; \ spin_unlock(&dcache_lock); \ res; \ }) #endif /* * putname changed to __putname in 2.6.6. */ #define compat___getname() __getname() #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 6) #define compat___putname(name) putname(name) #else #define compat___putname(name) __putname(name) #endif /* * inc_nlink, drop_nlink, and clear_nlink were added in 2.6.19. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) #define compat_inc_nlink(inode) ((inode)->i_nlink++) #define compat_drop_nlink(inode) ((inode)->i_nlink--) #define compat_clear_nlink(inode) ((inode)->i_nlink = 0) #else #define compat_inc_nlink(inode) inc_nlink(inode) #define compat_drop_nlink(inode) drop_nlink(inode) #define compat_clear_nlink(inode) clear_nlink(inode) #endif /* * i_size_write and i_size_read were introduced in 2.6.0-test1 * (though we'll look for them as of 2.6.1). They employ slightly different * locking in order to guarantee atomicity, depending on the length of a long, * whether the kernel is SMP, or whether the kernel is preemptible. Prior to * i_size_write and i_size_read, there was no such locking, so that's the * behavior we'll emulate. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 1) #define compat_i_size_read(inode) ((inode)->i_size) #define compat_i_size_write(inode, size) ((inode)->i_size = size) #else #define compat_i_size_read(inode) i_size_read(inode) #define compat_i_size_write(inode, size) i_size_write(inode, size) #endif /* * filemap_fdatawrite was introduced in 2.5.12. Prior to that, modules used * filemap_fdatasync instead. In 2.4.18, both filemap_fdatawrite and * filemap_fdatawait began returning status codes. Prior to that, they were * void functions, so we'll just have them return 0. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 18) #define compat_filemap_fdatawrite(mapping) \ ({ \ int result = 0; \ filemap_fdatasync(mapping); \ result; \ }) #define compat_filemap_fdatawait(mapping) \ ({ \ int result = 0; \ filemap_fdatawait(mapping); \ result; \ }) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 12) #define compat_filemap_fdatawrite(mapping) filemap_fdatasync(mapping) #define compat_filemap_fdatawait(mapping) filemap_fdatawait(mapping) #else #define compat_filemap_fdatawrite(mapping) filemap_fdatawrite(mapping) #define compat_filemap_fdatawait(mapping) filemap_fdatawait(mapping) #endif /* * filemap_write_and_wait was introduced in 2.6.6 and exported for module use * in 2.6.16. It's really just a simple wrapper around filemap_fdatawrite and * and filemap_fdatawait, which initiates a flush of all dirty pages, then * waits for the pages to flush. The implementation here is a simplified form * of the one found in 2.6.20-rc3. * * Unfortunately, it just isn't possible to implement this prior to 2.4.5, when * neither filemap_fdatawait nor filemap_fdatasync were exported for module * use. So we'll define it out and hope for the best. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 5) #define compat_filemap_write_and_wait(mapping) #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 16) #define compat_filemap_write_and_wait(mapping) \ ({ \ int result = 0; \ if (mapping->nrpages) { \ result = compat_filemap_fdatawrite(mapping); \ if (result != -EIO) { \ int result2 = compat_filemap_fdatawait(mapping); \ if (!result) { \ result = result2; \ } \ } \ } \ result; \ }) #else #define compat_filemap_write_and_wait(mapping) filemap_write_and_wait(mapping) #endif /* * invalidate_remote_inode was introduced in 2.6.0-test5. Prior to that, * filesystems wishing to invalidate pages belonging to an inode called * invalidate_inode_pages. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) #define compat_invalidate_remote_inode(inode) invalidate_inode_pages(inode) #else #define compat_invalidate_remote_inode(inode) invalidate_remote_inode(inode) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) #define VMW_FSYNC_OLD #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 3, 0) typedef umode_t compat_umode_t; #else typedef int compat_umode_t; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) #define d_make_root(inode) ({ \ struct dentry * ____res = d_alloc_root(inode); \ if (!____res) { \ iput(inode); \ } \ ____res; \ }) #endif #endif /* __COMPAT_FS_H__ */ vmhgfs-only/shared/driverLog.c 0000444 0000000 0000000 00000011117 13432725347 015405 0 ustar root root /********************************************************* * Copyright (C) 2007-2014 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * driverLog.c -- * * Common logging functions for Linux kernel modules. */ #include "driver-config.h" #include "compat_kernel.h" #include "compat_sched.h" #include <asm/current.h> #include "driverLog.h" #define LINUXLOG_BUFFER_SIZE 1024 static const char *driverLogPrefix = ""; /* * vsnprintf was born in 2.4.10. Fall back on vsprintf if we're * an older kernel. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 10) # define vsnprintf(str, size, fmt, args) vsprintf(str, fmt, args) #endif /* *---------------------------------------------------------------------------- * * DriverLog_Init -- * * Initializes the Linux logging. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------------- */ void DriverLog_Init(const char *prefix) // IN { driverLogPrefix = prefix ? prefix : ""; } /* *---------------------------------------------------------------------- * * DriverLogPrint -- * * Log error message from a Linux module. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ static void DriverLogPrint(const char *level, // IN: KERN_* constant const char *fmt, // IN: error format string va_list args) // IN: arguments for format string { static char staticBuf[LINUXLOG_BUFFER_SIZE]; char stackBuf[128]; va_list args2; const char *buf; /* * By default, use a small buffer on the stack (thread safe). If it is too * small, fall back to a larger static buffer (not thread safe). */ va_copy(args2, args); if (vsnprintf(stackBuf, sizeof stackBuf, fmt, args2) < sizeof stackBuf) { buf = stackBuf; } else { vsnprintf(staticBuf, sizeof staticBuf, fmt, args); buf = staticBuf; } va_end(args2); printk("%s%s[%d]: %s", level, driverLogPrefix, current->pid, buf); } /* *---------------------------------------------------------------------- * * Warning -- * * Warning messages from kernel module: logged into kernel log * as warnings. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void Warning(const char *fmt, ...) // IN: warning format string { va_list args; va_start(args, fmt); DriverLogPrint(KERN_WARNING, fmt, args); va_end(args); } /* *---------------------------------------------------------------------- * * Log -- * * Log messages from kernel module: logged into kernel log * as debug information. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void Log(const char *fmt, ...) // IN: log format string { va_list args; /* * Use the kernel log with at least a KERN_DEBUG level * so it doesn't garbage the screen at (re)boot time on RedHat 6.0. */ va_start(args, fmt); DriverLogPrint(KERN_DEBUG, fmt, args); va_end(args); } /* *---------------------------------------------------------------------- * * Panic -- * * ASSERTION failures and Panics from kernel module get here. * Message is logged to the kernel log and on console. * * Results: * None. * * Side effects: * Never returns * *---------------------------------------------------------------------- */ void Panic(const char *fmt, ...) // IN: panic format string { va_list args; va_start(args, fmt); DriverLogPrint(KERN_EMERG, fmt, args); va_end(args); #ifdef BUG BUG(); #else /* Should die with %cs unwritable, or at least with page fault. */ asm volatile("movb $0, %cs:(0)"); #endif while (1); } vmhgfs-only/shared/compat_autoconf.h 0000444 0000000 0000000 00000002641 13432725347 016640 0 ustar root root /********************************************************* * Copyright (C) 2009 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_AUTOCONF_H__ # define __COMPAT_AUTOCONF_H__ #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMKDRIVERS #include "includeCheck.h" #ifndef LINUX_VERSION_CODE # error "Include compat_version.h before compat_autoconf.h" #endif /* autoconf.h moved from linux/autoconf.h to generated/autoconf.h in 2.6.33-rc1. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 33) # include <linux/autoconf.h> #else # include <generated/autoconf.h> #endif #endif /* __COMPAT_AUTOCONF_H__ */ vmhgfs-only/shared/compat_skbuff.h 0000444 0000000 0000000 00000016131 13432725347 016301 0 ustar root root /********************************************************* * Copyright (C) 2007 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_SKBUFF_H__ # define __COMPAT_SKBUFF_H__ #include <linux/skbuff.h> /* * When transition from mac/nh/h to skb_* accessors was made, also SKB_WITH_OVERHEAD * was introduced. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) || \ (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 21) && defined(SKB_WITH_OVERHEAD)) #define compat_skb_mac_header(skb) skb_mac_header(skb) #define compat_skb_network_header(skb) skb_network_header(skb) #define compat_skb_network_offset(skb) skb_network_offset(skb) #define compat_skb_transport_header(skb) skb_transport_header(skb) #define compat_skb_transport_offset(skb) skb_transport_offset(skb) #define compat_skb_network_header_len(skb) skb_network_header_len(skb) #define compat_skb_tail_pointer(skb) skb_tail_pointer(skb) #define compat_skb_end_pointer(skb) skb_end_pointer(skb) #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) # define compat_skb_ip_header(skb) ip_hdr(skb) # define compat_skb_ipv6_header(skb) ipv6_hdr(skb) # define compat_skb_tcp_header(skb) tcp_hdr(skb) #else # define compat_skb_ip_header(skb) ((struct iphdr *)skb_network_header(skb)) # define compat_skb_ipv6_header(skb) ((struct ipv6hdr *)skb_network_header(skb)) # define compat_skb_tcp_header(skb) ((struct tcphdr *)skb_transport_header(skb)) #endif #define compat_skb_reset_mac_header(skb) skb_reset_mac_header(skb) #define compat_skb_reset_network_header(skb) skb_reset_network_header(skb) #define compat_skb_reset_transport_header(skb) skb_reset_transport_header(skb) #define compat_skb_set_network_header(skb, off) skb_set_network_header(skb, off) #define compat_skb_set_transport_header(skb, off) skb_set_transport_header(skb, off) #else #define compat_skb_mac_header(skb) (skb)->mac.raw #define compat_skb_network_header(skb) (skb)->nh.raw #define compat_skb_network_offset(skb) ((skb)->nh.raw - (skb)->data) #define compat_skb_transport_header(skb) (skb)->h.raw #define compat_skb_transport_offset(skb) ((skb)->h.raw - (skb)->data) #define compat_skb_network_header_len(skb) ((skb)->h.raw - (skb)->nh.raw) #define compat_skb_tail_pointer(skb) (skb)->tail #define compat_skb_end_pointer(skb) (skb)->end #define compat_skb_ip_header(skb) (skb)->nh.iph #define compat_skb_ipv6_header(skb) (skb)->nh.ipv6h #define compat_skb_tcp_header(skb) (skb)->h.th #define compat_skb_reset_mac_header(skb) ((skb)->mac.raw = (skb)->data) #define compat_skb_reset_network_header(skb) ((skb)->nh.raw = (skb)->data) #define compat_skb_reset_transport_header(skb) ((skb)->h.raw = (skb)->data) #define compat_skb_set_network_header(skb, off) ((skb)->nh.raw = (skb)->data + (off)) #define compat_skb_set_transport_header(skb, off) ((skb)->h.raw = (skb)->data + (off)) #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18) || defined(VMW_SKB_LINEARIZE_2618) # define compat_skb_linearize(skb) skb_linearize((skb)) #else # if LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 0) # define compat_skb_linearize(skb) __skb_linearize((skb), GFP_ATOMIC) # elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 4) # define compat_skb_linearize(skb) skb_linearize((skb), GFP_ATOMIC) # else static inline int compat_skb_linearize(struct sk_buff *skb) { return 0; } # endif #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) #define compat_skb_csum_offset(skb) (skb)->csum_offset #else #define compat_skb_csum_offset(skb) (skb)->csum #endif /* * Note that compat_skb_csum_start() has semantic different from kernel's csum_start: * kernel's skb->csum_start is offset between start of checksummed area and start of * complete skb buffer, while our compat_skb_csum_start(skb) is offset from start * of packet itself. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 22) #define compat_skb_csum_start(skb) ((skb)->csum_start - skb_headroom(skb)) #else #define compat_skb_csum_start(skb) compat_skb_transport_offset(skb) #endif #if defined(NETIF_F_GSO) /* 2.6.18 and upwards */ #define compat_skb_mss(skb) (skb_shinfo(skb)->gso_size) #else #define compat_skb_mss(skb) (skb_shinfo(skb)->tso_size) #endif /* used by both received pkts and outgoing ones */ #define VM_CHECKSUM_UNNECESSARY CHECKSUM_UNNECESSARY /* csum status of received pkts */ #if defined(CHECKSUM_COMPLETE) # define VM_RX_CHECKSUM_PARTIAL CHECKSUM_COMPLETE #elif LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) && defined(CHECKSUM_HW) # define VM_RX_CHECKSUM_PARTIAL CHECKSUM_HW #else # define VM_RX_CHECKSUM_PARTIAL CHECKSUM_PARTIAL #endif /* csum status of outgoing pkts */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19) && defined(CHECKSUM_HW) # define VM_TX_CHECKSUM_PARTIAL CHECKSUM_HW #else # define VM_TX_CHECKSUM_PARTIAL CHECKSUM_PARTIAL #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,1,0)) # define compat_kfree_skb(skb, type) kfree_skb(skb, type) # define compat_dev_kfree_skb(skb, type) dev_kfree_skb(skb, type) # define compat_dev_kfree_skb_any(skb, type) dev_kfree_skb(skb, type) # define compat_dev_kfree_skb_irq(skb, type) dev_kfree_skb(skb, type) #else # define compat_kfree_skb(skb, type) kfree_skb(skb) # define compat_dev_kfree_skb(skb, type) dev_kfree_skb(skb) # if (LINUX_VERSION_CODE < KERNEL_VERSION(2,3,43)) # define compat_dev_kfree_skb_any(skb, type) dev_kfree_skb(skb) # define compat_dev_kfree_skb_irq(skb, type) dev_kfree_skb(skb) # else # define compat_dev_kfree_skb_any(skb, type) dev_kfree_skb_any(skb) # define compat_dev_kfree_skb_irq(skb, type) dev_kfree_skb_irq(skb) # endif #endif #ifndef NET_IP_ALIGN # define COMPAT_NET_IP_ALIGN 2 #else # define COMPAT_NET_IP_ALIGN NET_IP_ALIGN #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 4, 4) # define compat_skb_headlen(skb) skb_headlen(skb) # define compat_pskb_may_pull(skb, len) pskb_may_pull(skb, len) # define compat_skb_is_nonlinear(skb) skb_is_nonlinear(skb) #else # define compat_skb_headlen(skb) (skb)->len # define compat_pskb_may_pull(skb, len) 1 # define compat_skb_is_nonlinear(skb) 0 #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 12) # define compat_skb_header_cloned(skb) skb_header_cloned(skb) #else # define compat_skb_header_cloned(skb) 0 #endif #endif /* __COMPAT_SKBUFF_H__ */ vmhgfs-only/shared/compat_semaphore.h 0000444 0000000 0000000 00000003142 13432725347 017002 0 ustar root root /********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_SEMAPHORE_H__ # define __COMPAT_SEMAPHORE_H__ /* <= 2.6.25 have asm only, 2.6.26 has both, and 2.6.27-rc2+ has linux only. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) # include <asm/semaphore.h> #else # include <linux/semaphore.h> #endif /* * The init_MUTEX_LOCKED() API appeared in 2.2.18, and is also in * 2.2.17-21mdk --hpreg */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) #ifndef init_MUTEX_LOCKED #define init_MUTEX_LOCKED(_sem) *(_sem) = MUTEX_LOCKED #endif #ifndef DECLARE_MUTEX #define DECLARE_MUTEX(name) struct semaphore name = MUTEX #endif #ifndef DECLARE_MUTEX_LOCKED #define DECLARE_MUTEX_LOCKED(name) struct semaphore name = MUTEX_LOCKED #endif #endif #endif /* __COMPAT_SEMAPHORE_H__ */ vmhgfs-only/shared/vmware_pack_end.h 0000444 0000000 0000000 00000002475 13432725350 016603 0 ustar root root /********************************************************* * Copyright (C) 2002-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmware_pack_end.h -- * * End of structure packing. See vmware_pack_init.h for details. * * Note that we do not use the following construct in this include file, * because we want to emit the code every time the file is included --hpreg * * #ifndef foo * # define foo * ... * #endif * */ #include "vmware_pack_init.h" #ifdef _MSC_VER # pragma pack(pop) #elif __GNUC__ __attribute__((__packed__)) #else # error Compiler packing... #endif vmhgfs-only/shared/compat_uaccess.h 0000444 0000000 0000000 00000006062 13432725347 016451 0 ustar root root /********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_UACCESS_H__ # define __COMPAT_UACCESS_H__ /* User space access functions moved in 2.1.7 to asm/uaccess.h --hpreg */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 7) # include <asm/uaccess.h> #else # include <asm/segment.h> #endif /* get_user() API modified in 2.1.4 to take 2 arguments --hpreg */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 1, 4) # define compat_get_user get_user #else /* * We assign 0 to the variable in case of failure to prevent "`_var' might be * used uninitialized in this function" compiler warnings. I think it is OK, * because the hardware-based version in newer kernels probably has the same * semantics and does not guarantee that the value of _var will not be * modified, should the access fail --hpreg */ # define compat_get_user(_var, _uvAddr) ({ \ int _status; \ \ _status = verify_area(VERIFY_READ, _uvAddr, sizeof(*(_uvAddr))); \ if (_status == 0) { \ (_var) = get_user(_uvAddr); \ } else { \ (_var) = 0; \ } \ _status; \ }) #endif /* * The copy_from_user() API appeared in 2.1.4 * * The emulation is not perfect here, but it is conservative: on failure, we * always return the total size, instead of the potentially smaller faulty * size --hpreg * * Since 2.5.55 copy_from_user() is no longer macro. */ #if !defined(copy_from_user) && LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 0) # define copy_from_user(_to, _from, _size) ( \ verify_area(VERIFY_READ, _from, _size) \ ? (_size) \ : (memcpy_fromfs(_to, _from, _size), 0) \ ) # define copy_to_user(_to, _from, _size) ( \ verify_area(VERIFY_WRITE, _to, _size) \ ? (_size) \ : (memcpy_tofs(_to, _from, _size), 0) \ ) #endif #endif /* __COMPAT_UACCESS_H__ */ vmhgfs-only/shared/compat_spinlock.h 0000444 0000000 0000000 00000003377 13432725347 016653 0 ustar root root /********************************************************* * Copyright (C) 2005 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_SPINLOCK_H__ # define __COMPAT_SPINLOCK_H__ #include <linux/spinlock.h> /* * Preempt support was added during 2.5.x development cycle, and later * it was backported to 2.4.x. In 2.4.x backport these definitions * live in linux/spinlock.h, that's why we put them here (in 2.6.x they * are defined in linux/preempt.h which is included by linux/spinlock.h). */ #ifdef CONFIG_PREEMPT #define compat_preempt_disable() preempt_disable() #define compat_preempt_enable() preempt_enable() #else #define compat_preempt_disable() do { } while (0) #define compat_preempt_enable() do { } while (0) #endif /* Some older kernels - 2.6.10 and earlier - lack DEFINE_SPINLOCK */ #ifndef DEFINE_SPINLOCK #define DEFINE_SPINLOCK(x) spinlock_t x = SPIN_LOCK_UNLOCKED #endif /* Same goes for DEFINE_RWLOCK */ #ifndef DEFINE_RWLOCK #define DEFINE_RWLOCK(x) rwlock_t x = RW_LOCK_UNLOCKED #endif #endif /* __COMPAT_SPINLOCK_H__ */ vmhgfs-only/shared/driverLog.h 0000444 0000000 0000000 00000002237 13432725347 015415 0 ustar root root /********************************************************* * Copyright (C) 2007-2014 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * driverLog.h -- * * Logging functions for Linux kernel modules. */ #ifndef __DRIVERLOG_H__ #define __DRIVERLOG_H__ /* * The definitions of Warning(), Log(), and Panic() come from vm_assert.h for * consistency. */ #include "vm_assert.h" void DriverLog_Init(const char *prefix); #endif /* __DRIVERLOG_H__ */ vmhgfs-only/shared/x86cpuid.h 0000644 0000000 0000000 00000327332 13432725350 015134 0 ustar root root /********************************************************* * Copyright (C) 1998-2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef _X86CPUID_H_ #define _X86CPUID_H_ /* http://www.sandpile.org/ia32/cpuid.htm */ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_VMX #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMMON #include "includeCheck.h" #include "vm_basic_types.h" #include "community_source.h" #include "x86vendor.h" #include "vm_assert.h" #if defined __cplusplus extern "C" { #endif /* * The linux kernel's ptrace.h stupidly defines the bare * EAX/EBX/ECX/EDX, which wrecks havoc with our preprocessor tricks. */ #undef EAX #undef EBX #undef ECX #undef EDX typedef struct CPUIDRegs { uint32 eax, ebx, ecx, edx; } CPUIDRegs; typedef union CPUIDRegsUnion { uint32 array[4]; CPUIDRegs regs; } CPUIDRegsUnion; /* * Results of calling cpuid(eax, ecx) on all host logical CPU. */ #ifdef _MSC_VER // TODO: Move this under the push #pragma warning (disable :4200) // non-std extension: zero-sized array in struct #pragma warning (push) #pragma warning (disable :4100) // unreferenced parameters #endif typedef #include "vmware_pack_begin.h" struct CPUIDReply { /* * Unique host logical CPU identifier. It does not change across queries, so * we use it to correlate the replies of multiple queries. */ uint64 tag; // OUT CPUIDRegs regs; // OUT } #include "vmware_pack_end.h" CPUIDReply; typedef #include "vmware_pack_begin.h" struct CPUIDQuery { uint32 eax; // IN uint32 ecx; // IN uint32 numLogicalCPUs; // IN/OUT CPUIDReply logicalCPUs[0]; // OUT } #include "vmware_pack_end.h" CPUIDQuery; /* * CPUID levels the monitor caches. * * The first parameter defines whether the level has its default masks * generated from the values in this file. Any level which is marked as FALSE * here *must* have all monitor support types set to NA. A static assert in * lib/cpuidcompat/cpuidcompat.c will check this. * * The second parameter is the "short name" of the level. It's mainly used for * token concatenation in various macros. * * The third parameter is the actual numeric value of that level (the EAX input * value). * * The fourth parameter is a "subleaf count", where 0 means that ecx is * ignored, otherwise is the count of sub-leaves. * * The fifth parameter is the first hardware version that is *aware* of the * CPUID level (0 = existed since dawn of time), even though we may not expose * this level or parts of it to guest. */ #define CPUID_CACHED_LEVELS \ CPUIDLEVEL(TRUE, 0, 0, 0, 0) \ CPUIDLEVEL(TRUE, 1, 1, 0, 0) \ CPUIDLEVEL(FALSE, 2, 2, 0, 0) \ CPUIDLEVEL(FALSE, 4, 4, 7, 0) \ CPUIDLEVEL(FALSE, 5, 5, 0, 0) \ CPUIDLEVEL(TRUE, 6, 6, 0, 0) \ CPUIDLEVEL(TRUE, 7, 7, 1, 0) \ CPUIDLEVEL(FALSE, A, 0xA, 0, 0) \ CPUIDLEVEL(FALSE, B, 0xB, 2, 0) \ CPUIDLEVEL(TRUE, D, 0xD, 10, 0) \ CPUIDLEVEL(TRUE, F, 0xF, 2, 13) \ CPUIDLEVEL(TRUE, 10, 0x10, 2, 13) \ CPUIDLEVEL(TRUE, 12, 0x12, 4, 13) \ CPUIDLEVEL(TRUE, 14, 0x14, 2, 13) \ CPUIDLEVEL(TRUE, 15, 0x15, 0, 13) \ CPUIDLEVEL(TRUE, 16, 0x16, 0, 13) \ CPUIDLEVEL(TRUE, 17, 0x17, 4, 14) \ CPUIDLEVEL(FALSE, 400, 0x40000000, 0, 0) \ CPUIDLEVEL(FALSE, 401, 0x40000001, 0, 0) \ CPUIDLEVEL(FALSE, 402, 0x40000002, 0, 0) \ CPUIDLEVEL(FALSE, 403, 0x40000003, 0, 0) \ CPUIDLEVEL(FALSE, 404, 0x40000004, 0, 0) \ CPUIDLEVEL(FALSE, 405, 0x40000005, 0, 0) \ CPUIDLEVEL(FALSE, 406, 0x40000006, 0, 0) \ CPUIDLEVEL(FALSE, 410, 0x40000010, 0, 0) \ CPUIDLEVEL(FALSE, 80, 0x80000000, 0, 0) \ CPUIDLEVEL(TRUE, 81, 0x80000001, 0, 0) \ CPUIDLEVEL(FALSE, 82, 0x80000002, 0, 0) \ CPUIDLEVEL(FALSE, 83, 0x80000003, 0, 0) \ CPUIDLEVEL(FALSE, 84, 0x80000004, 0, 0) \ CPUIDLEVEL(FALSE, 85, 0x80000005, 0, 0) \ CPUIDLEVEL(FALSE, 86, 0x80000006, 0, 0) \ CPUIDLEVEL(FALSE, 87, 0x80000007, 0, 0) \ CPUIDLEVEL(TRUE, 88, 0x80000008, 0, 0) \ CPUIDLEVEL(TRUE, 8A, 0x8000000A, 0, 0) \ CPUIDLEVEL(FALSE, 819, 0x80000019, 0, 0) \ CPUIDLEVEL(FALSE, 81A, 0x8000001A, 0, 0) \ CPUIDLEVEL(FALSE, 81B, 0x8000001B, 0, 0) \ CPUIDLEVEL(FALSE, 81C, 0x8000001C, 0, 0) \ CPUIDLEVEL(FALSE, 81D, 0x8000001D, 5, 0) \ CPUIDLEVEL(FALSE, 81E, 0x8000001E, 0, 0) \ CPUIDLEVEL(TRUE, 81F, 0x8000001F, 0, 14) #define CPUID_ALL_LEVELS CPUID_CACHED_LEVELS /* Define cached CPUID levels in the form: CPUID_LEVEL_<ShortName> */ typedef enum { #define CPUIDLEVEL(t, s, v, c, h) CPUID_LEVEL_##s, CPUID_CACHED_LEVELS #undef CPUIDLEVEL CPUID_NUM_CACHED_LEVELS } CpuidCachedLevel; /* Enum to translate between shorthand name and actual CPUID level value. */ enum { #define CPUIDLEVEL(t, s, v, c, h) CPUID_LEVEL_VAL_##s = v, CPUID_ALL_LEVELS #undef CPUIDLEVEL }; /* Named feature leaves */ #define CPUID_FEATURE_INFORMATION 0x01 #define CPUID_PROCESSOR_TOPOLOGY 4 #define CPUID_MWAIT_FEATURES 5 #define CPUID_XSAVE_FEATURES 0xd #define CPUID_SGX_FEATURES 0x12 #define CPUID_PT_FEATURES 0x14 #define CPUID_HYPERVISOR_LEVEL_0 0x40000000 #define CPUID_SVM_FEATURES 0x8000000a /* * CPUID result registers */ #define CPUID_REGS \ CPUIDREG(EAX, eax) \ CPUIDREG(EBX, ebx) \ CPUIDREG(ECX, ecx) \ CPUIDREG(EDX, edx) typedef enum { #define CPUIDREG(uc, lc) CPUID_REG_##uc, CPUID_REGS #undef CPUIDREG CPUID_NUM_REGS } CpuidReg; #define CPUID_INTEL_VENDOR_STRING "GenuntelineI" #define CPUID_AMD_VENDOR_STRING "AuthcAMDenti" #define CPUID_CYRIX_VENDOR_STRING "CyriteadxIns" #define CPUID_VIA_VENDOR_STRING "CentaulsaurH" #define CPUID_HYGON_VENDOR_STRING "HygouinenGen" #define CPUID_HYPERV_HYPERVISOR_VENDOR_STRING "Microsoft Hv" #define CPUID_KVM_HYPERVISOR_VENDOR_STRING "KVMKVMKVM\0\0\0" #define CPUID_VMWARE_HYPERVISOR_VENDOR_STRING "VMwareVMware" #define CPUID_XEN_HYPERVISOR_VENDOR_STRING "XenVMMXenVMM" #define CPUID_INTEL_VENDOR_STRING_FIXED "GenuineIntel" #define CPUID_AMD_VENDOR_STRING_FIXED "AuthenticAMD" #define CPUID_CYRIX_VENDOR_STRING_FIXED "CyrixInstead" #define CPUID_VIA_VENDOR_STRING_FIXED "CentaurHauls" #define CPUID_HYGON_VENDOR_STRING_FIXED "HygonGenuine" /* * FIELD can be defined to process the CPUID information provided in the * following CPUID_FIELD_DATA macro. * * The first parameter is the CPUID level of the feature (must be defined in * CPUID_ALL_LEVELS, above). * * The second parameter is the CPUID sub-level (subleaf) of the feature. Please * make sure here the number is consistent with the "subleaf count" in * CPUIDLEVEL macro. I.e., if a feature is being added to a _new_ subleaf, * update the subleaf count above as well. * * The third parameter is the result register. * * The fourth and fifth parameters are the bit position of the field and the * width, respectively. * * The sixth is the name of the field. * * The seventh parameter specifies the monitor support characteristics for * this field. The value must be a valid CpuidFieldSupported value (omitting * CPUID_FIELD_SUPPORT_ for convenience). The meaning of those values are * described below. * * The eighth parameter specifies the first virtual hardware version that * implements the field (if 7th field is YES or ANY), or 0 (if 7th field is * NO or NA). The field's hardware version must match the version in * defaultMasks (cpuidcompat.c) if defined there, and must be less than or * equal to the version of the cpuid leaf it's in. * * The ninth parameter describes whether the feature is capable of being used * by usermode code (TRUE), or just CPL0 kernel code (FALSE). * * FLAG is defined identically to FIELD, but its accessors are more appropriate * for 1-bit flags, and compile-time asserts enforce that the size is 1 bit * wide. */ /* * CpuidFieldSupported is made up of the following values: * * NO: A feature/field that IS NOT SUPPORTED by the monitor. Even * if the host supports this feature, we will never expose it to * the guest. * * YES: A feature/field that IS SUPPORTED by the monitor. If the * host supports this feature, we will expose it to the guest. If * not, then we will not set the feature. * * ANY: A feature/field that IS ALWAYS SUPPORTED by the monitor. * Even if the host does not support the feature, the monitor can * expose the feature to the guest. As with "YES", the guest cpuid * value defaults to the host/evc cpuid value. But usually the * guest cpuid value is recomputed at power on, ignoring the default * value. * * * NA: Only legal for levels not masked/tested by default (see * above for this definition). Such fields must always be marked * as NA. * * These distinctions, when combined with the feature's CPL3 * properties can be translated into a common CPUID mask string as * follows: * * NO + CPL3 --> "R" (Reserved). We don't support the feature, * but we can't properly hide this from applications when using * direct execution or HV with apps that do try/catch/fail, so we * must still perform compatibility checks. * * NO + !CPL3 --> "0" (Masked). We can hide this from the guest. * * YES --> "H" (Host). We support the feature, so show it to the * guest if the host has the feature. * * ANY/NA --> "X" (Ignore). By default, don't perform checks for * this feature bit. Per-GOS masks may choose to set this bit in * the guest. (e.g. the APIC feature bit is always set to 1.) * * See lib/cpuidcompat/cpuidcompat.c for any possible overrides to * these defaults. */ typedef enum { CPUID_FIELD_SUPPORTED_NO, CPUID_FIELD_SUPPORTED_YES, CPUID_FIELD_SUPPORTED_ANY, CPUID_FIELD_SUPPORTED_NA, CPUID_NUM_FIELD_SUPPORTEDS } CpuidFieldSupported; /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_0 \ FIELD( 0, 0, EAX, 0, 32, NUMLEVELS, ANY, 4, FALSE) \ FIELD( 0, 0, EBX, 0, 32, VENDOR1, YES, 4, TRUE) \ FIELD( 0, 0, ECX, 0, 32, VENDOR3, YES, 4, TRUE) \ FIELD( 0, 0, EDX, 0, 32, VENDOR2, YES, 4, TRUE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_1 \ FIELD( 1, 0, EAX, 0, 4, STEPPING, ANY, 4, FALSE) \ FIELD( 1, 0, EAX, 4, 4, MODEL, ANY, 4, FALSE) \ FIELD( 1, 0, EAX, 8, 4, FAMILY, YES, 4, FALSE) \ FIELD( 1, 0, EAX, 12, 2, TYPE, ANY, 4, FALSE) \ FIELD( 1, 0, EAX, 16, 4, EXTENDED_MODEL, ANY, 4, FALSE) \ FIELD( 1, 0, EAX, 20, 8, EXTENDED_FAMILY, YES, 4, FALSE) \ FIELD( 1, 0, EBX, 0, 8, BRAND_ID, ANY, 4, FALSE) \ FIELD( 1, 0, EBX, 8, 8, CLFL_SIZE, ANY, 4, FALSE) \ FIELD( 1, 0, EBX, 16, 8, LCPU_COUNT, ANY, 4, FALSE) \ FIELD( 1, 0, EBX, 24, 8, APICID, ANY, 4, FALSE) \ FLAG( 1, 0, ECX, 0, 1, SSE3, YES, 4, TRUE) \ FLAG( 1, 0, ECX, 1, 1, PCLMULQDQ, YES, 7, TRUE) \ FLAG( 1, 0, ECX, 2, 1, DTES64, NO, 0, FALSE) \ FLAG( 1, 0, ECX, 3, 1, MWAIT, YES, 4, FALSE) \ FLAG( 1, 0, ECX, 4, 1, DSCPL, NO, 0, FALSE) \ FLAG( 1, 0, ECX, 5, 1, VMX, YES, 4, FALSE) \ FLAG( 1, 0, ECX, 6, 1, SMX, YES, 15, FALSE) \ FLAG( 1, 0, ECX, 7, 1, EIST, NO, 0, FALSE) \ FLAG( 1, 0, ECX, 8, 1, TM2, NO, 0, FALSE) \ FLAG( 1, 0, ECX, 9, 1, SSSE3, YES, 4, TRUE) \ FLAG( 1, 0, ECX, 10, 1, CNXTID, NO, 0, FALSE) \ FLAG( 1, 0, ECX, 11, 1, SDBG, NO, 0, FALSE) \ FLAG( 1, 0, ECX, 12, 1, FMA, YES, 8, TRUE) \ FLAG( 1, 0, ECX, 13, 1, CMPXCHG16B, YES, 4, TRUE) \ FLAG( 1, 0, ECX, 14, 1, xTPR, NO, 0, FALSE) \ FLAG( 1, 0, ECX, 15, 1, PDCM, NO, 0, FALSE) \ FLAG( 1, 0, ECX, 17, 1, PCID, YES, 8, FALSE) \ FLAG( 1, 0, ECX, 18, 1, DCA, NO, 0, FALSE) \ FLAG( 1, 0, ECX, 19, 1, SSE41, YES, 4, TRUE) \ FLAG( 1, 0, ECX, 20, 1, SSE42, YES, 4, TRUE) \ FLAG( 1, 0, ECX, 21, 1, x2APIC, ANY, 9, FALSE) \ FLAG( 1, 0, ECX, 22, 1, MOVBE, YES, 7, TRUE) \ FLAG( 1, 0, ECX, 23, 1, POPCNT, YES, 4, TRUE) \ FLAG( 1, 0, ECX, 24, 1, TSC_DEADLINE, ANY, 11, FALSE) \ FLAG( 1, 0, ECX, 25, 1, AES, YES, 7, TRUE) \ FLAG( 1, 0, ECX, 26, 1, XSAVE, YES, 8, FALSE) \ FLAG( 1, 0, ECX, 27, 1, OSXSAVE, ANY, 8, FALSE) \ FLAG( 1, 0, ECX, 28, 1, AVX, YES, 8, FALSE) \ FLAG( 1, 0, ECX, 29, 1, F16C, YES, 9, TRUE) \ FLAG( 1, 0, ECX, 30, 1, RDRAND, YES, 9, TRUE) \ FLAG( 1, 0, ECX, 31, 1, HYPERVISOR, ANY, 4, TRUE) \ FLAG( 1, 0, EDX, 0, 1, FPU, YES, 4, TRUE) \ FLAG( 1, 0, EDX, 1, 1, VME, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 2, 1, DE, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 3, 1, PSE, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 4, 1, TSC, YES, 4, TRUE) \ FLAG( 1, 0, EDX, 5, 1, MSR, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 6, 1, PAE, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 7, 1, MCE, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 8, 1, CX8, YES, 4, TRUE) \ FLAG( 1, 0, EDX, 9, 1, APIC, ANY, 4, FALSE) \ FLAG( 1, 0, EDX, 11, 1, SEP, YES, 4, TRUE) \ FLAG( 1, 0, EDX, 12, 1, MTRR, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 13, 1, PGE, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 14, 1, MCA, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 15, 1, CMOV, YES, 4, TRUE) \ FLAG( 1, 0, EDX, 16, 1, PAT, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 17, 1, PSE36, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 18, 1, PSN, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 19, 1, CLFSH, YES, 4, TRUE) \ FLAG( 1, 0, EDX, 21, 1, DS, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 22, 1, ACPI, ANY, 4, FALSE) \ FLAG( 1, 0, EDX, 23, 1, MMX, YES, 4, TRUE) \ FLAG( 1, 0, EDX, 24, 1, FXSR, YES, 4, TRUE) \ FLAG( 1, 0, EDX, 25, 1, SSE, YES, 4, TRUE) \ FLAG( 1, 0, EDX, 26, 1, SSE2, YES, 4, TRUE) \ FLAG( 1, 0, EDX, 27, 1, SS, YES, 4, FALSE) \ FLAG( 1, 0, EDX, 28, 1, HTT, ANY, 7, FALSE) \ FLAG( 1, 0, EDX, 29, 1, TM, NO, 0, FALSE) \ FLAG( 1, 0, EDX, 30, 1, IA64, NO, 0, FALSE) \ FLAG( 1, 0, EDX, 31, 1, PBE, NO, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_2 \ FIELD( 2, 0, EAX, 0, 8, LEAF2_COUNT, NA, 0, FALSE) \ FIELD( 2, 0, EAX, 8, 8, LEAF2_CACHE1, NA, 0, FALSE) \ FIELD( 2, 0, EAX, 16, 8, LEAF2_CACHE2, NA, 0, FALSE) \ FIELD( 2, 0, EAX, 24, 8, LEAF2_CACHE3, NA, 0, FALSE) \ FIELD( 2, 0, EBX, 0, 8, LEAF2_CACHE4, NA, 0, FALSE) \ FIELD( 2, 0, EBX, 8, 8, LEAF2_CACHE5, NA, 0, FALSE) \ FIELD( 2, 0, EBX, 16, 8, LEAF2_CACHE6, NA, 0, FALSE) \ FIELD( 2, 0, EBX, 24, 8, LEAF2_CACHE7, NA, 0, FALSE) \ FIELD( 2, 0, ECX, 0, 8, LEAF2_CACHE8, NA, 0, FALSE) \ FIELD( 2, 0, ECX, 8, 8, LEAF2_CACHE9, NA, 0, FALSE) \ FIELD( 2, 0, ECX, 16, 8, LEAF2_CACHE10, NA, 0, FALSE) \ FIELD( 2, 0, ECX, 24, 8, LEAF2_CACHE11, NA, 0, FALSE) \ FIELD( 2, 0, EDX, 0, 8, LEAF2_CACHE12, NA, 0, FALSE) \ FIELD( 2, 0, EDX, 8, 8, LEAF2_CACHE13, NA, 0, FALSE) \ FIELD( 2, 0, EDX, 16, 8, LEAF2_CACHE14, NA, 0, FALSE) \ FIELD( 2, 0, EDX, 24, 8, LEAF2_CACHE15, NA, 0, FALSE) \ /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_4 \ FIELD( 4, 0, EAX, 0, 5, LEAF4_CACHE_TYPE, NA, 0, FALSE) \ FIELD( 4, 0, EAX, 5, 3, LEAF4_CACHE_LEVEL, NA, 0, FALSE) \ FLAG( 4, 0, EAX, 8, 1, LEAF4_CACHE_SELF_INIT, NA, 0, FALSE) \ FLAG( 4, 0, EAX, 9, 1, LEAF4_CACHE_FULLY_ASSOC, NA, 0, FALSE) \ FIELD( 4, 0, EAX, 14, 12, LEAF4_CACHE_NUMHT_SHARING, NA, 0, FALSE) \ FIELD( 4, 0, EAX, 26, 6, LEAF4_CORE_COUNT, NA, 0, FALSE) \ FIELD( 4, 0, EBX, 0, 12, LEAF4_CACHE_LINE, NA, 0, FALSE) \ FIELD( 4, 0, EBX, 12, 10, LEAF4_CACHE_PART, NA, 0, FALSE) \ FIELD( 4, 0, EBX, 22, 10, LEAF4_CACHE_WAYS, NA, 0, FALSE) \ FIELD( 4, 0, ECX, 0, 32, LEAF4_CACHE_SETS, NA, 0, FALSE) \ FLAG( 4, 0, EDX, 0, 1, LEAF4_CACHE_WBINVD_NOT_GUARANTEED, NA, 0, FALSE) \ FLAG( 4, 0, EDX, 1, 1, LEAF4_CACHE_IS_INCLUSIVE, NA, 0, FALSE) \ FLAG( 4, 0, EDX, 2, 1, LEAF4_CACHE_COMPLEX_INDEXING, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_5 \ FIELD( 5, 0, EAX, 0, 16, MWAIT_MIN_SIZE, NA, 0, FALSE) \ FIELD( 5, 0, EBX, 0, 16, MWAIT_MAX_SIZE, NA, 0, FALSE) \ FLAG( 5, 0, ECX, 0, 1, MWAIT_EXTENSIONS, NA, 0, FALSE) \ FLAG( 5, 0, ECX, 1, 1, MWAIT_INTR_BREAK, NA, 0, FALSE) \ FIELD( 5, 0, EDX, 0, 4, MWAIT_C0_SUBSTATE, NA, 0, FALSE) \ FIELD( 5, 0, EDX, 4, 4, MWAIT_C1_SUBSTATE, NA, 0, FALSE) \ FIELD( 5, 0, EDX, 8, 4, MWAIT_C2_SUBSTATE, NA, 0, FALSE) \ FIELD( 5, 0, EDX, 12, 4, MWAIT_C3_SUBSTATE, NA, 0, FALSE) \ FIELD( 5, 0, EDX, 16, 4, MWAIT_C4_SUBSTATE, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_6 \ FLAG( 6, 0, EAX, 0, 1, THERMAL_SENSOR, NO, 0, FALSE) \ FLAG( 6, 0, EAX, 1, 1, TURBO_MODE, NO, 0, FALSE) \ FLAG( 6, 0, EAX, 2, 1, APIC_INVARIANT, ANY, 13, FALSE) \ FLAG( 6, 0, EAX, 4, 1, PLN, NO, 0, FALSE) \ FLAG( 6, 0, EAX, 5, 1, ECMD, NO, 0, FALSE) \ FLAG( 6, 0, EAX, 6, 1, PTM, NO, 0, FALSE) \ FLAG( 6, 0, EAX, 7, 1, HWP, NO, 0, FALSE) \ FLAG( 6, 0, EAX, 8, 1, HWP_NOTIFICATION, NO, 0, FALSE) \ FLAG( 6, 0, EAX, 9, 1, HWP_ACTIVITY_WINDOW, NO, 0, FALSE) \ FLAG( 6, 0, EAX, 10, 1, HWP_ENERGY_PERFORMANCE_PREFERENCE, NO, 0, FALSE) \ FLAG( 6, 0, EAX, 11, 1, HWP_PACKAGE_LEVEL_REQUEST, NO, 0, FALSE) \ FLAG( 6, 0, EAX, 13, 1, HDC, NO, 0, FALSE) \ FIELD( 6, 0, EBX, 0, 4, NUM_INTR_THRESHOLDS, NO, 0, FALSE) \ FLAG( 6, 0, ECX, 0, 1, HW_COORD_FEEDBACK, NO, 0, FALSE) \ FLAG( 6, 0, ECX, 1, 1, ACNT2, ANY, 13, FALSE) \ FLAG( 6, 0, ECX, 3, 1, ENERGY_PERF_BIAS, NO, 0, FALSE) #define CPUID_7_EDX_28 \ FLAG( 7, 0, EDX, 28, 1, LEVEL7EDX_RSVD1, NO, 0, FALSE) #define CPUID_7_EDX_31 \ FLAG( 7, 0, EDX, 31, 1, LEVEL7EDX_RSVD2, NO, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_7 \ FLAG( 7, 0, EBX, 0, 1, FSGSBASE, YES, 9, FALSE) \ FLAG( 7, 0, EBX, 1, 1, TSC_ADJUST, ANY, 11, FALSE) \ FLAG( 7, 0, EBX, 2, 1, SGX, YES, 16, FALSE) \ FLAG( 7, 0, EBX, 3, 1, BMI1, YES, 9, TRUE) \ FLAG( 7, 0, EBX, 4, 1, HLE, YES, 11, TRUE) \ FLAG( 7, 0, EBX, 5, 1, AVX2, YES, 11, TRUE) \ FLAG( 7, 0, EBX, 6, 1, FDP_EXCPTN_ONLY, ANY, 13, TRUE) \ FLAG( 7, 0, EBX, 7, 1, SMEP, YES, 9, FALSE) \ FLAG( 7, 0, EBX, 8, 1, BMI2, YES, 11, TRUE) \ FLAG( 7, 0, EBX, 9, 1, ENFSTRG, YES, 9, FALSE) \ FLAG( 7, 0, EBX, 10, 1, INVPCID, YES, 11, FALSE) \ FLAG( 7, 0, EBX, 11, 1, RTM, YES, 11, TRUE) \ FLAG( 7, 0, EBX, 12, 1, PQM, NO, 0, FALSE) \ FLAG( 7, 0, EBX, 13, 1, FP_SEGMENT_ZERO, ANY, 11, TRUE) \ FLAG( 7, 0, EBX, 14, 1, MPX, YES, 13, TRUE) \ FLAG( 7, 0, EBX, 15, 1, PQE, NO, 0, FALSE) \ FLAG( 7, 0, EBX, 16, 1, AVX512F, YES, 13, TRUE) \ FLAG( 7, 0, EBX, 17, 1, AVX512DQ, YES, 13, TRUE) \ FLAG( 7, 0, EBX, 18, 1, RDSEED, YES, 11, TRUE) \ FLAG( 7, 0, EBX, 19, 1, ADX, YES, 11, TRUE) \ FLAG( 7, 0, EBX, 20, 1, SMAP, YES, 11, FALSE) \ FLAG( 7, 0, EBX, 21, 1, AVX512IFMA, YES, 16, TRUE) \ FLAG( 7, 0, EBX, 23, 1, CLFLUSHOPT, YES, 13, TRUE) \ FLAG( 7, 0, EBX, 24, 1, CLWB, YES, 13, TRUE) \ FLAG( 7, 0, EBX, 25, 1, PT, YES, 16, FALSE) \ FLAG( 7, 0, EBX, 26, 1, AVX512PF, YES, 13, TRUE) \ FLAG( 7, 0, EBX, 27, 1, AVX512ER, YES, 13, TRUE) \ FLAG( 7, 0, EBX, 28, 1, AVX512CD, YES, 13, TRUE) \ FLAG( 7, 0, EBX, 29, 1, SHA, YES, 14, TRUE) \ FLAG( 7, 0, EBX, 30, 1, AVX512BW, YES, 13, TRUE) \ FLAG( 7, 0, EBX, 31, 1, AVX512VL, YES, 13, TRUE) \ FLAG( 7, 0, ECX, 0, 1, PREFETCHWT1, YES, 13, TRUE) \ FLAG( 7, 0, ECX, 1, 1, AVX512VBMI, YES, 16, TRUE) \ FLAG( 7, 0, ECX, 2, 1, UMIP, NO, 0, FALSE) \ FLAG( 7, 0, ECX, 3, 1, PKU, YES, 13, TRUE) \ FLAG( 7, 0, ECX, 4, 1, OSPKE, ANY, 13, TRUE) \ FLAG( 7, 0, ECX, 6, 1, AVX512VBMI2, NO, 0, TRUE) \ FLAG( 7, 0, ECX, 8, 1, GFNI, NO, 0, TRUE) \ FLAG( 7, 0, ECX, 9, 1, VAES, NO, 0, TRUE) \ FLAG( 7, 0, ECX, 10, 1, VPCLMULQDQ, NO, 0, TRUE) \ FLAG( 7, 0, ECX, 11, 1, AVX512VNNI, NO, 0, TRUE) \ FLAG( 7, 0, ECX, 12, 1, AVX512BITALG, NO, 0, TRUE) \ FLAG( 7, 0, ECX, 14, 1, AVX512VPOPCNTDQ, YES, 15, TRUE) \ FLAG( 7, 0, ECX, 16, 1, VA57, NO, 0, TRUE) \ FIELD( 7, 0, ECX, 17, 5, MAWA, NO, 0, TRUE) \ FLAG( 7, 0, ECX, 22, 1, RDPID, NO, 0, TRUE) \ FLAG( 7, 0, ECX, 30, 1, SGX_LC, YES, 16, FALSE) \ FLAG( 7, 0, EDX, 2, 1, AVX512QVNNIW, YES, 15, TRUE) \ FLAG( 7, 0, EDX, 3, 1, AVX512QFMAPS, YES, 15, TRUE) \ FLAG( 7, 0, EDX, 26, 1, IBRSIBPB, ANY, 9, FALSE) \ FLAG( 7, 0, EDX, 27, 1, STIBP, YES, 9, FALSE) \ CPUID_7_EDX_28 \ FLAG( 7, 0, EDX, 29, 1, ARCH_CAPABILITIES, ANY, 9, FALSE) \ CPUID_7_EDX_31 /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_A \ FIELD( A, 0, EAX, 0, 8, PMC_VERSION, NA, 0, FALSE) \ FIELD( A, 0, EAX, 8, 8, PMC_NUM_GEN, NA, 0, FALSE) \ FIELD( A, 0, EAX, 16, 8, PMC_WIDTH_GEN, NA, 0, FALSE) \ FIELD( A, 0, EAX, 24, 8, PMC_EBX_LENGTH, NA, 0, FALSE) \ FLAG( A, 0, EBX, 0, 1, PMC_CORE_CYCLES, NA, 0, FALSE) \ FLAG( A, 0, EBX, 1, 1, PMC_INSTR_RETIRED, NA, 0, FALSE) \ FLAG( A, 0, EBX, 2, 1, PMC_REF_CYCLES, NA, 0, FALSE) \ FLAG( A, 0, EBX, 3, 1, PMC_LAST_LVL_CREF, NA, 0, FALSE) \ FLAG( A, 0, EBX, 4, 1, PMC_LAST_LVL_CMISS, NA, 0, FALSE) \ FLAG( A, 0, EBX, 5, 1, PMC_BR_INST_RETIRED, NA, 0, FALSE) \ FLAG( A, 0, EBX, 6, 1, PMC_BR_MISS_RETIRED, NA, 0, FALSE) \ FIELD( A, 0, EDX, 0, 5, PMC_NUM_FIXED, NA, 0, FALSE) \ FIELD( A, 0, EDX, 5, 8, PMC_WIDTH_FIXED, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_B \ FIELD( B, 0, EAX, 0, 5, TOPOLOGY_MASK_WIDTH, NA, 0, FALSE) \ FIELD( B, 0, EBX, 0, 16, TOPOLOGY_CPUS_SHARING_LEVEL, NA, 0, FALSE) \ FIELD( B, 0, ECX, 0, 8, TOPOLOGY_LEVEL_NUMBER, NA, 0, FALSE) \ FIELD( B, 0, ECX, 8, 8, TOPOLOGY_LEVEL_TYPE, NA, 0, FALSE) \ FIELD( B, 0, EDX, 0, 32, TOPOLOGY_X2APIC_ID, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_D \ FLAG( D, 0, EAX, 0, 1, XCR0_MASTER_LEGACY_FP, YES, 8, FALSE) \ FLAG( D, 0, EAX, 1, 1, XCR0_MASTER_SSE, YES, 8, FALSE) \ FLAG( D, 0, EAX, 2, 1, XCR0_MASTER_YMM_H, YES, 8, FALSE) \ FLAG( D, 0, EAX, 3, 1, XCR0_MASTER_BNDREGS, YES, 13, FALSE) \ FLAG( D, 0, EAX, 4, 1, XCR0_MASTER_BNDCSR, YES, 13, FALSE) \ FLAG( D, 0, EAX, 5, 1, XCR0_MASTER_OPMASK, YES, 13, FALSE) \ FLAG( D, 0, EAX, 6, 1, XCR0_MASTER_ZMM_H, YES, 13, FALSE) \ FLAG( D, 0, EAX, 7, 1, XCR0_MASTER_HI16_ZMM, YES, 13, FALSE) \ FLAG( D, 0, EAX, 8, 1, XCR0_MASTER_XSS, NO, 0, FALSE) \ FLAG( D, 0, EAX, 9, 1, XCR0_MASTER_PKRU, YES, 13, FALSE) \ FIELD( D, 0, EAX, 10,22, XCR0_MASTER_LOWER, NO, 0, FALSE) \ FIELD( D, 0, EBX, 0, 32, XSAVE_ENABLED_SIZE, ANY, 8, FALSE) \ FIELD( D, 0, ECX, 0, 32, XSAVE_MAX_SIZE, YES, 8, FALSE) \ FIELD( D, 0, EDX, 0, 29, XCR0_MASTER_UPPER, NO, 0, FALSE) \ FLAG( D, 0, EDX, 30, 1, XCR0_MASTER_LWP, NO, 0, FALSE) \ FLAG( D, 0, EDX, 31, 1, XCR0_MASTER_EXTENDED_XSAVE, NO, 0, FALSE) \ FLAG( D, 1, EAX, 0, 1, XSAVEOPT, YES, 11, FALSE) \ FLAG( D, 1, EAX, 1, 1, XSAVEC, YES, 13, FALSE) \ FLAG( D, 1, EAX, 2, 1, XGETBV_ECX1, NO, 0, FALSE) \ FLAG( D, 1, EAX, 3, 1, XSAVES, YES, 13, FALSE) \ FIELD( D, 1, EBX, 0, 32, XSAVES_ENABLED_SIZE, ANY, 13, FALSE) \ FIELD( D, 1, ECX, 0, 7, XSS_XCR0_USED0, NO, 0, FALSE) \ FLAG( D, 1, ECX, 8, 1, XSS_PT, NO, 0, FALSE) \ FIELD( D, 1, ECX, 9, 1, XSS_XCR0_USED1, NO, 0, FALSE) \ FIELD( D, 1, ECX, 10,22, XSS_RSVD0, NO, 0, FALSE) \ FIELD( D, 1, EDX, 0, 32, XSS_RSVD1, NO, 0, FALSE) \ FIELD( D, 2, EAX, 0, 32, XSAVE_YMM_SIZE, YES, 9, FALSE) \ FIELD( D, 2, EBX, 0, 32, XSAVE_YMM_OFFSET, YES, 9, FALSE) \ FLAG( D, 2, ECX, 0, 1, XSAVE_YMM_SUP_BY_XSS, NO, 0, FALSE) \ FLAG( D, 2, ECX, 1, 1, XSAVE_YMM_ALIGN, YES, 13, FALSE) \ FIELD( D, 2, ECX, 2, 30, XSAVE_YMM_RSVD1, NO, 0, FALSE) \ FIELD( D, 2, EDX, 0, 32, XSAVE_YMM_RSVD2, NO, 0, FALSE) \ FIELD( D, 3, EAX, 0, 32, XSAVE_BNDREGS_SIZE, YES, 13, FALSE) \ FIELD( D, 3, EBX, 0, 32, XSAVE_BNDREGS_OFFSET, YES, 13, FALSE) \ FLAG( D, 3, ECX, 0, 1, XSAVE_BNDREGS_SUP_BY_XSS, NO, 0, FALSE) \ FLAG( D, 3, ECX, 1, 1, XSAVE_BNDREGS_ALIGN, YES, 13, FALSE) \ FIELD( D, 3, ECX, 2, 30, XSAVE_BNDREGS_RSVD1, NO, 0, FALSE) \ FIELD( D, 3, EDX, 0, 32, XSAVE_BNDREGS_RSVD2, NO, 0, FALSE) \ FIELD( D, 4, EAX, 0, 32, XSAVE_BNDCSR_SIZE, YES, 13, FALSE) \ FIELD( D, 4, EBX, 0, 32, XSAVE_BNDCSR_OFFSET, YES, 13, FALSE) \ FLAG( D, 4, ECX, 0, 1, XSAVE_BNDCSR_SUP_BY_XSS, NO, 0, FALSE) \ FLAG( D, 4, ECX, 1, 1, XSAVE_BNDCSR_ALIGN, YES, 13, FALSE) \ FIELD( D, 4, ECX, 2, 30, XSAVE_BNDCSR_RSVD1, NO, 0, FALSE) \ FIELD( D, 4, EDX, 0, 32, XSAVE_BNDCSR_RSVD2, NO, 0, FALSE) \ FIELD( D, 5, EAX, 0, 32, XSAVE_OPMASK_SIZE, YES, 13, FALSE) \ FIELD( D, 5, EBX, 0, 32, XSAVE_OPMASK_OFFSET, YES, 13, FALSE) \ FLAG( D, 5, ECX, 0, 1, XSAVE_OPMASK_SUP_BY_XSS, NO, 0, FALSE) \ FLAG( D, 5, ECX, 1, 1, XSAVE_OPMASK_ALIGN, YES, 13, FALSE) \ FIELD( D, 5, ECX, 2, 30, XSAVE_OPMASK_RSVD1, NO, 0, FALSE) \ FIELD( D, 5, EDX, 0, 32, XSAVE_OPMASK_RSVD2, NO, 0, FALSE) \ FIELD( D, 6, EAX, 0, 32, XSAVE_ZMM_H_SIZE, YES, 13, FALSE) \ FIELD( D, 6, EBX, 0, 32, XSAVE_ZMM_H_OFFSET, YES, 13, FALSE) \ FLAG( D, 6, ECX, 0, 1, XSAVE_ZMM_H_SUP_BY_XSS, NO, 0, FALSE) \ FLAG( D, 6, ECX, 1, 1, XSAVE_ZMM_H_ALIGN, YES, 13, FALSE) \ FIELD( D, 6, ECX, 2, 30, XSAVE_ZMM_H_RSVD1, NO, 0, FALSE) \ FIELD( D, 6, EDX, 0, 32, XSAVE_ZMM_H_RSVD2, NO, 0, FALSE) \ FIELD( D, 7, EAX, 0, 32, XSAVE_HI16_ZMM_SIZE, YES, 13, FALSE) \ FIELD( D, 7, EBX, 0, 32, XSAVE_HI16_ZMM_OFFSET, YES, 13, FALSE) \ FLAG( D, 7, ECX, 0, 1, XSAVE_HI16_ZMM_SUP_BY_XSS, NO, 0, FALSE) \ FLAG( D, 7, ECX, 1, 1, XSAVE_HI16_ZMM_ALIGN, YES, 13, FALSE) \ FIELD( D, 7, ECX, 2, 30, XSAVE_HI16_ZMM_RSVD1, NO, 0, FALSE) \ FIELD( D, 7, EDX, 0, 32, XSAVE_HI16_ZMM_RSVD2, NO, 0, FALSE) \ FIELD( D, 8, EAX, 0, 32, XSAVE_PT_STATE_SIZE, NO, 0, FALSE) \ FIELD( D, 8, EBX, 0, 32, XSAVE_PT_STATE_OFFSET, NO, 0, FALSE) \ FLAG( D, 8, ECX, 0, 1, XSAVE_PT_STATE_SUP_BY_XSS, NO, 0, FALSE) \ FLAG( D, 8, ECX, 1, 1, XSAVE_PT_STATE_ALIGN, NO, 0, FALSE) \ FIELD( D, 8, ECX, 2, 30, XSAVE_PT_STATE_RSVD1, NO, 0, FALSE) \ FIELD( D, 8, EDX, 0, 32, XSAVE_PT_STATE_RSVD2, NO, 0, FALSE) \ FIELD( D, 9, EAX, 0, 32, XSAVE_PKRU_SIZE, YES, 13, FALSE) \ FIELD( D, 9, EBX, 0, 32, XSAVE_PKRU_OFFSET, YES, 13, FALSE) \ FLAG( D, 9, ECX, 0, 1, XSAVE_PKRU_SUP_BY_XSS, NO, 0, FALSE) \ FLAG( D, 9, ECX, 1, 1, XSAVE_PKRU_ALIGN, YES, 13, FALSE) \ FIELD( D, 9, ECX, 2, 30, XSAVE_PKRU_RSVD1, NO, 0, FALSE) \ FIELD( D, 9, EDX, 0, 32, XSAVE_PKRU_RSVD2, NO, 0, FALSE) \ FIELD( D, 62, EAX, 0, 32, XSAVE_LWP_SIZE, NO, 0, FALSE) \ FIELD( D, 62, EBX, 0, 32, XSAVE_LWP_OFFSET, NO, 0, FALSE) \ FIELD( D, 62, ECX, 0, 32, XSAVE_LWP_RSVD1, NO, 0, FALSE) \ FIELD( D, 62, EDX, 0, 32, XSAVE_LWP_RSVD2, NO, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_F \ FIELD( F, 0, EBX, 0, 32, PQM_MAX_RMID, NO, 0, FALSE) \ FLAG( F, 0, EDX, 1, 1, PQM_CMT_SUPPORT, NO, 0, FALSE) \ FIELD( F, 1, EBX, 0, 32, PQM_CMT_CONV, NO, 0, FALSE) \ FIELD( F, 1, ECX, 0, 32, PQM_CMT_NUM_RMID, NO, 0, FALSE) \ FLAG( F, 1, EDX, 0, 1, PQM_CMT_OCCUPANCY, NO, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_10 \ FLAG( 10, 0, EBX, 1, 1, L3_QOS_ENFORCEMENT, NO, 0, FALSE) \ FIELD( 10, 1, EAX, 0, 4, RESID_CAPACITY_MASK_LENGTH, NO, 0, FALSE) \ FIELD( 10, 1, EBX, 0, 32, ISOLATION_UNIT_MAP, NO, 0, FALSE) \ FLAG( 10, 1, ECX, 1, 1, INFREQUENT_COS_UPDATE, NO, 0, FALSE) \ FLAG( 10, 1, ECX, 2, 1, CODE_AND_DATA_PRIORITIZATION, NO, 0, FALSE) \ FIELD( 10, 1, EDX, 0, 16, MAX_COS_NUMBER, NO, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_12 \ FLAG( 12, 0, EAX, 0, 1, SGX1, YES, 16, FALSE) \ FLAG( 12, 0, EAX, 1, 1, SGX2, NO, 0, FALSE) \ FLAG( 12, 0, EBX, 0, 1, SGX_MISCSELECT_EXINFO, YES, 16, FALSE) \ FIELD( 12, 0, EBX, 1, 31, SGX_MISCSELECT_RSVD, NO, 0, FALSE) \ FIELD( 12, 0, EDX, 0, 8, MAX_ENCLAVE_SIZE_NOT64, YES, 16, FALSE) \ FIELD( 12, 0, EDX, 8, 8, MAX_ENCLAVE_SIZE_64, YES, 16, FALSE) \ FIELD( 12, 1, EAX, 0, 32, SECS_ATTRIBUTES0, YES, 16, FALSE) \ FIELD( 12, 1, EBX, 0, 32, SECS_ATTRIBUTES1, YES, 16, FALSE) \ FIELD( 12, 1, ECX, 0, 32, SECS_ATTRIBUTES2, YES, 16, FALSE) \ FIELD( 12, 1, EDX, 0, 32, SECS_ATTRIBUTES3, YES, 16, FALSE) \ FIELD( 12, 2, EAX, 0, 15, EPC00_VALID, YES, 16, FALSE) \ FIELD( 12, 2, EAX, 12, 20, EPC00_BASE_LOW, YES, 16, FALSE) \ FIELD( 12, 2, EBX, 0, 20, EPC00_BASE_HIGH, YES, 16, FALSE) \ FIELD( 12, 2, ECX, 0, 15, EPC00_PROTECTED, YES, 16, FALSE) \ FIELD( 12, 2, ECX, 12, 20, EPC00_SIZE_LOW, YES, 16, FALSE) \ FIELD( 12, 2, EDX, 0, 20, EPC00_SIZE_HIGH, YES, 16, FALSE) \ FIELD( 12, 3, EAX, 0, 4, EPC01_VALID, NO, 0, FALSE) \ FIELD( 12, 3, EAX, 12, 20, EPC01_BASE_LOW, NO, 0, FALSE) \ FIELD( 12, 3, EBX, 0, 20, EPC01_BASE_HIGH, NO, 0, FALSE) \ FIELD( 12, 3, ECX, 0, 4, EPC01_PROTECTED, NO, 0, FALSE) \ FIELD( 12, 3, ECX, 12, 20, EPC01_SIZE_LOW, NO, 0, FALSE) \ FIELD( 12, 3, EDX, 0, 20, EPC01_SIZE_HIGH, NO, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_14 \ FIELD( 14, 0, EAX, 0, 32, MAX_PT_SUB_LEAF, YES, 16, FALSE) \ FLAG( 14, 0, EBX, 0, 1, CR3FTR_AND_MATCHMSR_AVAILABLE, YES, 16, FALSE) \ FLAG( 14, 0, EBX, 1, 1, PSB_AND_CYCLE_ACCURATE_MODE, YES, 16, FALSE) \ FLAG( 14, 0, EBX, 2, 1, IP_TRACESTOP_FTR_PTMSR_PERSIST, YES, 16, FALSE) \ FLAG( 14, 0, EBX, 3, 1, MTC_PKT_GENERATION_SUPPORTED, YES, 16, FALSE) \ FLAG( 14, 0, ECX, 0, 1, TOPA_OUTPUT_SUPPORTED, YES, 16, FALSE) \ FLAG( 14, 0, ECX, 1, 1, TOPA_ALLOW_MULTIPLE_ENTRIES, YES, 16, FALSE) \ FLAG( 14, 0, ECX, 2, 1, SINGLE_RANGE_OUTPUT_SCHEME, YES, 16, FALSE) \ FLAG( 14, 0, ECX, 3, 1, TRACE_TRANSPORT_SUBSYSTEM, NO, 0, FALSE) \ FLAG( 14, 0, ECX, 31, 1, LIP_PRESENT_FOR_IP_PAYLOADS, YES, 16, FALSE) \ FIELD( 14, 1, EAX, 0, 2, NUM_ADDR_RANGE_FOR_FILTERING, YES, 16, FALSE) \ FIELD( 14, 1, EAX, 16, 16, SUPPORTED_MTC_ENCODINGS, YES, 16, FALSE) \ FIELD( 14, 1, EBX, 0, 16, SUPPORTED_CYCLE_THRESHOLD_ENCODINGS, YES,16,FALSE) \ FIELD( 14, 1, EBX, 16, 16, SUPPORTED_PSB_FREQ_ENCODINGS, YES, 16, FALSE) \ /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_15 \ FIELD( 15, 0, EAX, 0, 32, DENOM_TSC_TO_CORE_CRYSTAL_CLK, NO, 0, FALSE) \ FIELD( 15, 0, EBX, 0, 32, NUMER_TSC_TO_CORE_CRYSTAL_CLK, NO, 0, FALSE) \ /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_16 \ FIELD( 16, 0, EAX, 0, 16, PROC_BASE_FREQ, NO, 0, FALSE) \ FIELD( 16, 0, EBX, 0, 16, PROC_MIN_FREQ, NO, 0, FALSE) \ FIELD( 16, 0, ECX, 0, 16, BUS_FREQ, NO, 0, FALSE) \ /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_17 \ FIELD( 17, 0, EAX, 0, 31, MAX_SOCID_INDEX, NO, 0, FALSE) \ FIELD( 17, 0, EBX, 0, 16, SOC_VENDOR_ID, NO, 0, FALSE) \ FIELD( 17, 0, EBX, 16, 1, SOC_INDUSTRY_STD, NO, 0, FALSE) \ FIELD( 17, 0, ECX, 0, 31, SOC_PROJECT_ID, NO, 0, FALSE) \ FIELD( 17, 0, EDX, 0, 31, SOC_STEPPING_ID, NO, 0, FALSE) \ FIELD( 17, 1, EAX, 0, 32, SOC_VENDOR_BRAND_STRING_1_0, NO, 0, FALSE) \ FIELD( 17, 1, EBX, 0, 32, SOC_VENDOR_BRAND_STRING_1_1, NO, 0, FALSE) \ FIELD( 17, 1, ECX, 0, 32, SOC_VENDOR_BRAND_STRING_1_2, NO, 0, FALSE) \ FIELD( 17, 1, EDX, 0, 32, SOC_VENDOR_BRAND_STRING_1_3, NO, 0, FALSE) \ FIELD( 17, 2, EAX, 0, 32, SOC_VENDOR_BRAND_STRING_2_0, NO, 0, FALSE) \ FIELD( 17, 2, EBX, 0, 32, SOC_VENDOR_BRAND_STRING_2_1, NO, 0, FALSE) \ FIELD( 17, 2, ECX, 0, 32, SOC_VENDOR_BRAND_STRING_2_2, NO, 0, FALSE) \ FIELD( 17, 2, EDX, 0, 32, SOC_VENDOR_BRAND_STRING_2_3, NO, 0, FALSE) \ FIELD( 17, 3, EAX, 0, 32, SOC_VENDOR_BRAND_STRING_3_0, NO, 0, FALSE) \ FIELD( 17, 3, EBX, 0, 32, SOC_VENDOR_BRAND_STRING_3_1, NO, 0, FALSE) \ FIELD( 17, 3, ECX, 0, 32, SOC_VENDOR_BRAND_STRING_3_2, NO, 0, FALSE) \ FIELD( 17, 3, EDX, 0, 32, SOC_VENDOR_BRAND_STRING_3_3, NO, 0, FALSE) \ /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_400 \ FIELD(400, 0, EAX, 0, 32, MAX_HYP_LEVEL, NA, 0, FALSE) \ FIELD(400, 0, EBX, 0, 32, HYPERVISOR_VENDOR0, NA, 0, FALSE) \ FIELD(400, 0, ECX, 0, 32, HYPERVISOR_VENDOR1, NA, 0, FALSE) \ FIELD(400, 0, EDX, 0, 32, HYPERVISOR_VENDOR2, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_401 \ FIELD(401, 0, EAX, 0, 32, HV_INTERFACE_SIGNATURE, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_402 \ FIELD(402, 0, EAX, 0, 32, BUILD_NUMBER, NA, 0, FALSE) \ FIELD(402, 0, EBX, 0, 16, MINOR_VERSION, NA, 0, FALSE) \ FIELD(402, 0, EBX, 16, 16, MAJOR_VERSION, NA, 0, FALSE) \ FIELD(402, 0, ECX, 0, 32, SERVICE_PACK, NA, 0, FALSE) \ FIELD(402, 0, EDX, 0, 24, SERVICE_NUMBER, NA, 0, FALSE) \ FIELD(402, 0, EDX, 24, 8, SERVICE_BRANCH, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_403 \ FLAG( 403, 0, EAX, 0, 1, VP_RUNTIME_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EAX, 1, 1, REF_COUNTER_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EAX, 2, 1, BASIC_SYNIC_MSRS_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EAX, 3, 1, SYNTH_TIMER_MSRS_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EAX, 4, 1, APIC_ACCESS_MSRS_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EAX, 5, 1, HYPERCALL_MSRS_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EAX, 6, 1, VP_INDEX_MSR_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EAX, 7, 1, VIRT_RESET_MSR_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EAX, 8, 1, STATS_PAGES_MSRS_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EAX, 9, 1, REF_TSC_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EAX, 10, 1, GUEST_IDLE_MSR_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EAX, 11, 1, FREQUENCY_MSRS_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EAX, 12, 1, SYNTH_DEBUG_MSRS_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EBX, 0, 1, CREATE_PARTITIONS_FLAG, NA, 0, FALSE) \ FLAG( 403, 0, EBX, 1, 1, ACCESS_PARTITION_ID_FLAG, NA, 0, FALSE) \ FLAG( 403, 0, EBX, 2, 1, ACCESS_MEMORY_POOL_FLAG, NA, 0, FALSE) \ FLAG( 403, 0, EBX, 3, 1, ADJUST_MESSAGE_BUFFERS_FLAG, NA, 0, FALSE) \ FLAG( 403, 0, EBX, 4, 1, POST_MESSAGES_FLAG, NA, 0, FALSE) \ FLAG( 403, 0, EBX, 5, 1, SIGNAL_EVENTS_FLAG, NA, 0, FALSE) \ FLAG( 403, 0, EBX, 6, 1, CREATE_PORT_FLAG, NA, 0, FALSE) \ FLAG( 403, 0, EBX, 7, 1, CONNECT_PORT_FLAG, NA, 0, FALSE) \ FLAG( 403, 0, EBX, 8, 1, ACCESS_STATS_FLAG, NA, 0, FALSE) \ FLAG( 403, 0, EBX, 11, 1, DEBUGGING_FLAG, NA, 0, FALSE) \ FLAG( 403, 0, EBX, 12, 1, CPU_MANAGEMENT_FLAG, NA, 0, FALSE) \ FLAG( 403, 0, EBX, 13, 1, CONFIGURE_PROFILER_FLAG, NA, 0, FALSE) \ FLAG( 403, 0, EBX, 14, 1, ENABLE_EXPANDED_STACKWALKING_FLAG, NA, 0, FALSE) \ FIELD(403, 0, ECX, 0, 4, MAX_POWER_STATE, NA, 0, FALSE) \ FLAG( 403, 0, ECX, 4, 1, HPET_NEEDED_FOR_C3, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 0, 1, MWAIT_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 1, 1, GUEST_DEBUGGING_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 2, 1, PERFORMANCE_MONITOR_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 3, 1, CPU_DYN_PARTITIONING_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 4, 1, XMM_REGISTERS_FOR_HYPERCALL_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 5, 1, GUEST_IDLE_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 6, 1, HYPERVISOR_SLEEP_STATE_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 7, 1, NUMA_DISTANCE_QUERY_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 8, 1, TIMER_FREQUENCY_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 9, 1, SYNTH_MACHINE_CHECK_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 10, 1, GUEST_CRASH_MSRS_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 11, 1, DEBUG_MSRS_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 12, 1, NPIEP1_AVAIL, NA, 0, FALSE) \ FLAG( 403, 0, EDX, 13, 1, DISABLE_HYPERVISOR_AVAIL, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_404 \ FLAG( 404, 0, EAX, 0, 1, USE_HYPERCALL_TO_SWITCH_ADDR_SPACE,NA, 0, FALSE) \ FLAG( 404, 0, EAX, 1, 1, USE_HYPERCALL_TO_FLUSH_TLB, NA, 0, FALSE) \ FLAG( 404, 0, EAX, 2, 1, USE_HYPERCALL_FOR_TLB_SHOOTDOWN, NA, 0, FALSE) \ FLAG( 404, 0, EAX, 3, 1, USE_MSRS_FOR_EOI_ICR_TPR, NA, 0, FALSE) \ FLAG( 404, 0, EAX, 4, 1, USE_MSR_FOR_RESET, NA, 0, FALSE) \ FLAG( 404, 0, EAX, 5, 1, USE_RELAXED_TIMING, NA, 0, FALSE) \ FLAG( 404, 0, EAX, 6, 1, USE_DMA_REMAPPING, NA, 0, FALSE) \ FLAG( 404, 0, EAX, 7, 1, USE_INTERRUPT_REMAPPING, NA, 0, FALSE) \ FLAG( 404, 0, EAX, 8, 1, USE_X2APIC, NA, 0, FALSE) \ FLAG( 404, 0, EAX, 9, 1, DEPRECATE_AUTOEOI, NA, 0, FALSE) \ FIELD(404, 0, EBX, 0, 32, SPINLOCK_RETRIES, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_405 \ FIELD(405, 0, EAX, 0, 32, MAX_VCPU, NA, 0, FALSE) \ FIELD(405, 0, EBX, 0, 32, MAX_LCPU, NA, 0, FALSE) \ FIELD(405, 0, ECX, 0, 32, MAX_REMAPPABLE_VECTORS, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_406 \ FLAG( 406, 0, EAX, 0, 1, APIC_OVERLAY_ASSIST, NA, 0, FALSE) \ FLAG( 406, 0, EAX, 1, 1, MSR_BITMAPS, NA, 0, FALSE) \ FLAG( 406, 0, EAX, 2, 1, ARCH_PMCS, NA, 0, FALSE) \ FLAG( 406, 0, EAX, 3, 1, SLAT, NA, 0, FALSE) \ FLAG( 406, 0, EAX, 4, 1, DMA_REMAPPING, NA, 0, FALSE) \ FLAG( 406, 0, EAX, 5, 1, INTERRUPT_REMAPPING, NA, 0, FALSE) \ FLAG( 406, 0, EAX, 6, 1, MEMORY_PATROL_SCRUBBER, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_410 \ FIELD(410, 0, EAX, 0, 32, TSC_HZ, NA, 0, FALSE) \ FIELD(410, 0, EBX, 0, 32, APICBUS_HZ, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_80 \ FIELD( 80, 0, EAX, 0, 32, NUM_EXT_LEVELS, NA, 0, FALSE) \ FIELD( 80, 0, EBX, 0, 32, LEAF80_VENDOR1, NA, 0, FALSE) \ FIELD( 80, 0, ECX, 0, 32, LEAF80_VENDOR3, NA, 0, FALSE) \ FIELD( 80, 0, EDX, 0, 32, LEAF80_VENDOR2, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_81 \ FIELD( 81, 0, EAX, 0, 32, UNKNOWN81EAX, ANY, 4, FALSE) \ FIELD( 81, 0, EAX, 0, 4, LEAF81_STEPPING, ANY, 4, FALSE) \ FIELD( 81, 0, EAX, 4, 4, LEAF81_MODEL, ANY, 4, FALSE) \ FIELD( 81, 0, EAX, 8, 4, LEAF81_FAMILY, ANY, 4, FALSE) \ FIELD( 81, 0, EAX, 12, 2, LEAF81_TYPE, ANY, 4, FALSE) \ FIELD( 81, 0, EAX, 16, 4, LEAF81_EXTENDED_MODEL, ANY, 4, FALSE) \ FIELD( 81, 0, EAX, 20, 8, LEAF81_EXTENDED_FAMILY, ANY, 4, FALSE) \ FIELD( 81, 0, EBX, 0, 32, UNKNOWN81EBX, ANY, 4, FALSE) \ FIELD( 81, 0, EBX, 0, 16, LEAF81_BRAND_ID, ANY, 4, FALSE) \ FIELD( 81, 0, EBX, 16, 16, UNDEF, ANY, 4, FALSE) \ FLAG( 81, 0, ECX, 0, 1, LAHF64, YES, 4, TRUE) \ FLAG( 81, 0, ECX, 1, 1, CMPLEGACY, ANY, 9, FALSE) \ FLAG( 81, 0, ECX, 2, 1, SVM, YES, 7, FALSE) \ FLAG( 81, 0, ECX, 3, 1, EXTAPICSPC, YES, 4, FALSE) \ FLAG( 81, 0, ECX, 4, 1, CR8AVAIL, YES, 4, FALSE) \ FLAG( 81, 0, ECX, 5, 1, ABM, YES, 7, TRUE) \ FLAG( 81, 0, ECX, 6, 1, SSE4A, YES, 4, TRUE) \ FLAG( 81, 0, ECX, 7, 1, MISALIGNED_SSE, YES, 4, TRUE) \ FLAG( 81, 0, ECX, 8, 1, 3DNPREFETCH, YES, 4, TRUE) \ FLAG( 81, 0, ECX, 9, 1, OSVW, ANY, 8, FALSE) \ FLAG( 81, 0, ECX, 10, 1, IBS, NO, 0, FALSE) \ FLAG( 81, 0, ECX, 11, 1, XOP, YES, 8, TRUE) \ FLAG( 81, 0, ECX, 12, 1, SKINIT, NO, 0, FALSE) \ FLAG( 81, 0, ECX, 13, 1, WATCHDOG, NO, 0, FALSE) \ FLAG( 81, 0, ECX, 15, 1, LWP, NO, 0, FALSE) \ FLAG( 81, 0, ECX, 16, 1, FMA4, YES, 8, TRUE) \ FLAG( 81, 0, ECX, 17, 1, TCE, NO, 0, FALSE) \ FLAG( 81, 0, ECX, 19, 1, NODEID_MSR, NO, 0, FALSE) \ FLAG( 81, 0, ECX, 21, 1, TBM, YES, 9, TRUE) \ FLAG( 81, 0, ECX, 22, 1, TOPOLOGY, NO, 0, FALSE) \ FLAG( 81, 0, ECX, 23, 1, PERFCORE, ANY, 4, TRUE) \ FLAG( 81, 0, ECX, 24, 1, PERFNB, NO, 0, FALSE) \ FLAG( 81, 0, ECX, 26, 1, DATABK, NO, 0, FALSE) \ FLAG( 81, 0, ECX, 27, 1, PERFTSC, NO, 0, FALSE) \ FLAG( 81, 0, ECX, 28, 1, PERFL3, NO, 0, FALSE) \ FLAG( 81, 0, ECX, 29, 1, MWAITX, NO, 0, FALSE) \ FLAG( 81, 0, EDX, 0, 1, LEAF81_FPU, YES, 4, TRUE) \ FLAG( 81, 0, EDX, 1, 1, LEAF81_VME, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 2, 1, LEAF81_DE, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 3, 1, LEAF81_PSE, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 4, 1, LEAF81_TSC, YES, 4, TRUE) \ FLAG( 81, 0, EDX, 5, 1, LEAF81_MSR, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 6, 1, LEAF81_PAE, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 7, 1, LEAF81_MCE, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 8, 1, LEAF81_CX8, YES, 4, TRUE) \ FLAG( 81, 0, EDX, 9, 1, LEAF81_APIC, ANY, 4, FALSE) \ FLAG( 81, 0, EDX, 11, 1, SYSC, ANY, 4, TRUE) \ FLAG( 81, 0, EDX, 12, 1, LEAF81_MTRR, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 13, 1, LEAF81_PGE, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 14, 1, LEAF81_MCA, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 15, 1, LEAF81_CMOV, YES, 4, TRUE) \ FLAG( 81, 0, EDX, 16, 1, LEAF81_PAT, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 17, 1, LEAF81_PSE36, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 20, 1, NX, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 22, 1, MMXEXT, YES, 4, TRUE) \ FLAG( 81, 0, EDX, 23, 1, LEAF81_MMX, YES, 4, TRUE) \ FLAG( 81, 0, EDX, 24, 1, LEAF81_FXSR, YES, 4, TRUE) \ FLAG( 81, 0, EDX, 25, 1, FFXSR, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 26, 1, PDPE1GB, YES, 7, FALSE) \ FLAG( 81, 0, EDX, 27, 1, RDTSCP, YES, 4, TRUE) \ FLAG( 81, 0, EDX, 29, 1, LM, YES, 4, FALSE) \ FLAG( 81, 0, EDX, 30, 1, 3DNOWPLUS, YES, 4, TRUE) \ FLAG( 81, 0, EDX, 31, 1, 3DNOW, YES, 4, TRUE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_82 \ FIELD( 82, 0, EAX, 0, 32, LEAF82_BRAND_STRING_EAX, NA, 0, FALSE) \ FIELD( 82, 0, EBX, 0, 32, LEAF82_BRAND_STRING_EBX, NA, 0, FALSE) \ FIELD( 82, 0, ECX, 0, 32, LEAF82_BRAND_STRING_ECX, NA, 0, FALSE) \ FIELD( 82, 0, EDX, 0, 32, LEAF82_BRAND_STRING_EDX, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_83 \ FIELD( 83, 0, EAX, 0, 32, LEAF83_BRAND_STRING_EAX, NA, 0, FALSE) \ FIELD( 83, 0, EBX, 0, 32, LEAF83_BRAND_STRING_EBX, NA, 0, FALSE) \ FIELD( 83, 0, ECX, 0, 32, LEAF83_BRAND_STRING_ECX, NA, 0, FALSE) \ FIELD( 83, 0, EDX, 0, 32, LEAF83_BRAND_STRING_EDX, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_84 \ FIELD( 84, 0, EAX, 0, 32, LEAF84_BRAND_STRING_EAX, NA, 0, FALSE) \ FIELD( 84, 0, EBX, 0, 32, LEAF84_BRAND_STRING_EBX, NA, 0, FALSE) \ FIELD( 84, 0, ECX, 0, 32, LEAF84_BRAND_STRING_ECX, NA, 0, FALSE) \ FIELD( 84, 0, EDX, 0, 32, LEAF84_BRAND_STRING_EDX, NA, 0, FALSE) /* LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_85 \ FIELD( 85, 0, EAX, 0, 8, ITLB_ENTRIES_2M4M_PGS, NA, 0, FALSE) \ FIELD( 85, 0, EAX, 8, 8, ITLB_ASSOC_2M4M_PGS, NA, 0, FALSE) \ FIELD( 85, 0, EAX, 16, 8, DTLB_ENTRIES_2M4M_PGS, NA, 0, FALSE) \ FIELD( 85, 0, EAX, 24, 8, DTLB_ASSOC_2M4M_PGS, NA, 0, FALSE) \ FIELD( 85, 0, EBX, 0, 8, ITLB_ENTRIES_4K_PGS, NA, 0, FALSE) \ FIELD( 85, 0, EBX, 8, 8, ITLB_ASSOC_4K_PGS, NA, 0, FALSE) \ FIELD( 85, 0, EBX, 16, 8, DTLB_ENTRIES_4K_PGS, NA, 0, FALSE) \ FIELD( 85, 0, EBX, 24, 8, DTLB_ASSOC_4K_PGS, NA, 0, FALSE) \ FIELD( 85, 0, ECX, 0, 8, L1_DCACHE_LINE_SIZE, NA, 0, FALSE) \ FIELD( 85, 0, ECX, 8, 8, L1_DCACHE_LINES_PER_TAG, NA, 0, FALSE) \ FIELD( 85, 0, ECX, 16, 8, L1_DCACHE_ASSOC, NA, 0, FALSE) \ FIELD( 85, 0, ECX, 24, 8, L1_DCACHE_SIZE, NA, 0, FALSE) \ FIELD( 85, 0, EDX, 0, 8, L1_ICACHE_LINE_SIZE, NA, 0, FALSE) \ FIELD( 85, 0, EDX, 8, 8, L1_ICACHE_LINES_PER_TAG, NA, 0, FALSE) \ FIELD( 85, 0, EDX, 16, 8, L1_ICACHE_ASSOC, NA, 0, FALSE) \ FIELD( 85, 0, EDX, 24, 8, L1_ICACHE_SIZE, NA, 0, FALSE) /* LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_86 \ FIELD( 86, 0, EAX, 0, 12, L2_ITLB_ENTRIES_2M4M_PGS, NA, 0, FALSE) \ FIELD( 86, 0, EAX, 12, 4, L2_ITLB_ASSOC_2M4M_PGS, NA, 0, FALSE) \ FIELD( 86, 0, EAX, 16, 12, L2_DTLB_ENTRIES_2M4M_PGS, NA, 0, FALSE) \ FIELD( 86, 0, EAX, 28, 4, L2_DTLB_ASSOC_2M4M_PGS, NA, 0, FALSE) \ FIELD( 86, 0, EBX, 0, 12, L2_ITLB_ENTRIES_4K_PGS, NA, 0, FALSE) \ FIELD( 86, 0, EBX, 12, 4, L2_ITLB_ASSOC_4K_PGS, NA, 0, FALSE) \ FIELD( 86, 0, EBX, 16, 12, L2_DTLB_ENTRIES_4K_PGS, NA, 0, FALSE) \ FIELD( 86, 0, EBX, 28, 4, L2_DTLB_ASSOC_4K_PGS, NA, 0, FALSE) \ FIELD( 86, 0, ECX, 0, 8, L2CACHE_LINE, NA, 0, FALSE) \ FIELD( 86, 0, ECX, 8, 4, L2CACHE_LINE_PER_TAG, NA, 0, FALSE) \ FIELD( 86, 0, ECX, 12, 4, L2CACHE_WAYS, NA, 0, FALSE) \ FIELD( 86, 0, ECX, 16, 16, L2CACHE_SIZE, NA, 0, FALSE) \ FIELD( 86, 0, EDX, 0, 8, L3CACHE_LINE, NA, 0, FALSE) \ FIELD( 86, 0, EDX, 8, 4, L3CACHE_LINE_PER_TAG, NA, 0, FALSE) \ FIELD( 86, 0, EDX, 12, 4, L3CACHE_WAYS, NA, 0, FALSE) \ FIELD( 86, 0, EDX, 18, 14, L3CACHE_SIZE, NA, 0, FALSE) /* LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_87 \ FLAG( 87, 0, EBX, 0, 1, MCA_OVERFLOW_RECOV, NA, 0, FALSE) \ FLAG( 87, 0, EBX, 1, 1, SUCCOR, NA, 0, FALSE) \ FLAG( 87, 0, EBX, 2, 1, HWA, NA, 0, FALSE) \ FLAG( 87, 0, EBX, 3, 1, SCALABLE_MCA, NA, 0, FALSE) \ FLAG( 87, 0, EBX, 4, 1, PFEH_SUPPORT_PRESENT, NA, 0, FALSE) \ FLAG( 87, 0, EDX, 0, 1, TS, NA, 0, FALSE) \ FLAG( 87, 0, EDX, 1, 1, FID, NA, 0, FALSE) \ FLAG( 87, 0, EDX, 2, 1, VID, NA, 0, FALSE) \ FLAG( 87, 0, EDX, 3, 1, TTP, NA, 0, FALSE) \ FLAG( 87, 0, EDX, 4, 1, LEAF87_TM, NA, 0, FALSE) \ FLAG( 87, 0, EDX, 5, 1, STC, NA, 0, FALSE) \ FLAG( 87, 0, EDX, 6, 1, 100MHZSTEPS, NA, 0, FALSE) \ FLAG( 87, 0, EDX, 7, 1, HWPSTATE, NA, 0, FALSE) \ FLAG( 87, 0, EDX, 8, 1, TSC_INVARIANT, NA, 0, FALSE) \ FLAG( 87, 0, EDX, 9, 1, CORE_PERF_BOOST, NA, 0, FALSE) #define CPUID_88_EBX_14 \ FLAG( 88, 0, EBX, 14, 1, LEAF88_RSVD1, NO, 0, FALSE) #define CPUID_88_EBX_15 \ FLAG( 88, 0, EBX, 15, 1, LEAF88_RSVD2, NO, 0, FALSE) #define CPUID_88_EBX_16 \ FLAG( 88, 0, EBX, 16, 1, LEAF88_RSVD3, NO, 0, FALSE) #define CPUID_88_EBX_17 \ FLAG( 88, 0, EBX, 17, 1, LEAF88_RSVD4, NO, 0, FALSE) #define CPUID_88_EBX_18 \ FLAG( 88, 0, EBX, 18, 1, LEAF88_RSVD5, NO, 0, FALSE) /* LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_88 \ FIELD( 88, 0, EAX, 0, 8, PHYS_BITS, YES, 4, FALSE) \ FIELD( 88, 0, EAX, 8, 8, VIRT_BITS, YES, 4, FALSE) \ FIELD( 88, 0, EAX, 16, 8, GUEST_PHYS_ADDR_SZ, YES, 8, FALSE) \ FLAG( 88, 0, EBX, 0, 1, CLZERO, YES, 14, TRUE) \ FLAG( 88, 0, EBX, 1, 1, IRPERF, NO, 0, FALSE) \ FLAG( 88, 0, EBX, 2, 1, XSAVE_ERR_PTR, NO, 0, FALSE) \ FLAG( 88, 0, EBX, 12, 1, LEAF88_IBPB, ANY, 9, FALSE) \ CPUID_88_EBX_14 \ CPUID_88_EBX_15 \ CPUID_88_EBX_16 \ CPUID_88_EBX_17 \ CPUID_88_EBX_18 \ FIELD( 88, 0, ECX, 0, 8, LEAF88_CORE_COUNT, YES, 4, FALSE) \ FIELD( 88, 0, ECX, 12, 4, APICID_COREID_SIZE, YES, 7, FALSE) \ FIELD( 88, 0, ECX, 16, 2, PERFTSC_SIZE, NO, 0, FALSE) #define CPUID_8A_EDX_11 \ FLAG( 8A, 0, EDX, 11, 1, SVMEDX_RSVD1, NO, 0, FALSE) #define CPUID_8A_EDX_14 \ FLAG( 8A, 0, EDX, 14, 1, SVMEDX_RSVD2, NO, 0, FALSE) /* LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_8A \ FIELD( 8A, 0, EAX, 0, 8, SVM_REVISION, YES, 4, FALSE) \ FLAG( 8A, 0, EAX, 8, 1, SVM_HYPERVISOR, NO, 0, FALSE) \ FIELD( 8A, 0, EAX, 9, 23, SVMEAX_RSVD, NO, 0, FALSE) \ FIELD( 8A, 0, EBX, 0, 32, SVM_NUM_ASIDS, YES, 7, FALSE) \ FIELD( 8A, 0, ECX, 0, 32, SVMECX_RSVD, NO, 0, FALSE) \ FLAG( 8A, 0, EDX, 0, 1, SVM_NPT, YES, 7, FALSE) \ FLAG( 8A, 0, EDX, 1, 1, SVM_LBR, NO, 0, FALSE) \ FLAG( 8A, 0, EDX, 2, 1, SVM_LOCK, ANY, 7, FALSE) \ FLAG( 8A, 0, EDX, 3, 1, SVM_NRIP, YES, 7, FALSE) \ FLAG( 8A, 0, EDX, 4, 1, SVM_TSC_RATE_MSR, NO, 0, FALSE) \ FLAG( 8A, 0, EDX, 5, 1, SVM_VMCB_CLEAN, YES, 7, FALSE) \ FLAG( 8A, 0, EDX, 6, 1, SVM_FLUSH_BY_ASID, YES, 7, FALSE) \ FLAG( 8A, 0, EDX, 7, 1, SVM_DECODE_ASSISTS, YES, 7, FALSE) \ FIELD( 8A, 0, EDX, 8, 2, SVMEDX_RSVD0, NO, 0, FALSE) \ FLAG( 8A, 0, EDX, 10, 1, SVM_PAUSE_FILTER, NO, 0, FALSE) \ CPUID_8A_EDX_11 \ FLAG( 8A, 0, EDX, 12, 1, SVM_PAUSE_THRESHOLD, NO, 0, FALSE) \ FLAG( 8A, 0, EDX, 13, 1, SVM_AVIC, NO, 0, FALSE) \ CPUID_8A_EDX_14 \ FLAG( 8A, 0, EDX, 15, 1, SVM_V_VMSAVE_VMLOAD, NO, 0, FALSE) \ FLAG( 8A, 0, EDX, 16, 1, SVM_VGIF, NO, 0, FALSE) \ FIELD( 8A, 0, EDX, 17, 15, SVMEDX_RSVD, NO, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_819 \ FIELD(819, 0, EAX, 0, 12, L1_ITLB_ENTRIES_1G_PGS, NA, 0, FALSE) \ FIELD(819, 0, EAX, 12, 4, L1_ITLB_ASSOC_1G_PGS, NA, 0, FALSE) \ FIELD(819, 0, EAX, 16, 12, L1_DTLB_ENTRIES_1G_PGS, NA, 0, FALSE) \ FIELD(819, 0, EAX, 28, 4, L1_DTLB_ASSOC_1G_PGS, NA, 0, FALSE) \ FIELD(819, 0, EBX, 0, 12, L2_ITLB_ENTRIES_1G_PGS, NA, 0, FALSE) \ FIELD(819, 0, EBX, 12, 4, L2_ITLB_ASSOC_1G_PGS, NA, 0, FALSE) \ FIELD(819, 0, EBX, 16, 12, L2_DTLB_ENTRIES_1G_PGS, NA, 0, FALSE) \ FIELD(819, 0, EBX, 28, 4, L2_DTLB_ASSOC_1G_PGS, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_81A \ FLAG( 81A, 0, EAX, 0, 1, FP128, NA, 0, FALSE) \ FLAG( 81A, 0, EAX, 1, 1, MOVU, NA, 0, FALSE) \ FLAG( 81A, 0, EAX, 2, 1, FP256, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_81B \ FLAG( 81B, 0, EAX, 0, 1, IBS_FFV, NA, 0, FALSE) \ FLAG( 81B, 0, EAX, 1, 1, IBS_FETCHSAM, NA, 0, FALSE) \ FLAG( 81B, 0, EAX, 2, 1, IBS_OPSAM, NA, 0, FALSE) \ FLAG( 81B, 0, EAX, 3, 1, RW_OPCOUNT, NA, 0, FALSE) \ FLAG( 81B, 0, EAX, 4, 1, OPCOUNT, NA, 0, FALSE) \ FLAG( 81B, 0, EAX, 5, 1, BRANCH_TARGET_ADDR, NA, 0, FALSE) \ FLAG( 81B, 0, EAX, 6, 1, OPCOUNT_EXT, NA, 0, FALSE) \ FLAG( 81B, 0, EAX, 7, 1, RIP_INVALID_CHECK, NA, 0, FALSE) \ FLAG( 81B, 0, EAX, 8, 1, OP_BRN_FUSE, NA, 0, FALSE) \ FLAG( 81B, 0, EAX, 9, 1, IBS_FETCH_CTL_EXTD, NA, 0, FALSE) \ FLAG( 81B, 0, EAX, 10, 1, IBS_OP_DATA4, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_81C \ FLAG( 81C, 0, EAX, 0, 1, LWP_AVAIL, NA, 0, FALSE) \ FLAG( 81C, 0, EAX, 1, 1, LWP_VAL_AVAIL, NA, 0, FALSE) \ FLAG( 81C, 0, EAX, 2, 1, LWP_IRE_AVAIL, NA, 0, FALSE) \ FLAG( 81C, 0, EAX, 3, 1, LWP_BRE_AVAIL, NA, 0, FALSE) \ FLAG( 81C, 0, EAX, 4, 1, LWP_DME_AVAIL, NA, 0, FALSE) \ FLAG( 81C, 0, EAX, 5, 1, LWP_CNH_AVAIL, NA, 0, FALSE) \ FLAG( 81C, 0, EAX, 6, 1, LWP_RNH_AVAIL, NA, 0, FALSE) \ FLAG( 81C, 0, EAX, 29, 1, LWP_CONT_AVAIL, NA, 0, FALSE) \ FLAG( 81C, 0, EAX, 30, 1, LWP_PTSC_AVAIL, NA, 0, FALSE) \ FLAG( 81C, 0, EAX, 31, 1, LWP_INT_AVAIL, NA, 0, FALSE) \ FIELD(81C, 0, EBX, 0, 8, LWP_CB_SIZE, NA, 0, FALSE) \ FIELD(81C, 0, EBX, 8, 8, LWP_EVENT_SIZE, NA, 0, FALSE) \ FIELD(81C, 0, EBX, 16, 8, LWP_MAX_EVENTS, NA, 0, FALSE) \ FIELD(81C, 0, EBX, 24, 8, LWP_EVENT_OFFSET, NA, 0, FALSE) \ FIELD(81C, 0, ECX, 0, 4, LWP_LATENCY_MAX, NA, 0, FALSE) \ FLAG( 81C, 0, ECX, 5, 1, LWP_DATA_ADDR_VALID, NA, 0, FALSE) \ FIELD(81C, 0, ECX, 6, 3, LWP_LATENCY_ROUND, NA, 0, FALSE) \ FIELD(81C, 0, ECX, 9, 7, LWP_VERSION, NA, 0, FALSE) \ FIELD(81C, 0, ECX, 16, 8, LWP_MIN_BUF_SIZE, NA, 0, FALSE) \ FLAG( 81C, 0, ECX, 28, 1, LWP_BRANCH_PRED, NA, 0, FALSE) \ FLAG( 81C, 0, ECX, 29, 1, LWP_IP_FILTERING, NA, 0, FALSE) \ FLAG( 81C, 0, ECX, 30, 1, LWP_CACHE_LEVEL, NA, 0, FALSE) \ FLAG( 81C, 0, ECX, 31, 1, LWP_CACHE_LATENCY, NA, 0, FALSE) \ FLAG( 81C, 0, EDX, 0, 1, LWP_SUPPORTED, NA, 0, FALSE) \ FLAG( 81C, 0, EDX, 1, 1, LWP_VAL_SUPPORTED, NA, 0, FALSE) \ FLAG( 81C, 0, EDX, 2, 1, LWP_IRE_SUPPORTED, NA, 0, FALSE) \ FLAG( 81C, 0, EDX, 3, 1, LWP_BRE_SUPPORTED, NA, 0, FALSE) \ FLAG( 81C, 0, EDX, 4, 1, LWP_DME_SUPPORTED, NA, 0, FALSE) \ FLAG( 81C, 0, EDX, 5, 1, LWP_CNH_SUPPORTED, NA, 0, FALSE) \ FLAG( 81C, 0, EDX, 6, 1, LWP_RNH_SUPPORTED, NA, 0, FALSE) \ FLAG( 81C, 0, EDX, 29, 1, LWP_CONT_SUPPORTED, NA, 0, FALSE) \ FLAG( 81C, 0, EDX, 30, 1, LWP_PTSC_SUPPORTED, NA, 0, FALSE) \ FLAG( 81C, 0, EDX, 31, 1, LWP_INT_SUPPORTED, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_81D \ FIELD(81D, 0, EAX, 0, 5, LEAF81D_CACHE_TYPE, NA, 0, FALSE) \ FIELD(81D, 0, EAX, 5, 3, LEAF81D_CACHE_LEVEL, NA, 0, FALSE) \ FLAG( 81D, 0, EAX, 8, 1, LEAF81D_CACHE_SELF_INIT, NA, 0, FALSE) \ FLAG( 81D, 0, EAX, 9, 1, LEAF81D_CACHE_FULLY_ASSOC, NA, 0, FALSE) \ FIELD(81D, 0, EAX, 14, 12, LEAF81D_NUM_SHARING_CACHE, NA, 0, FALSE) \ FIELD(81D, 0, EBX, 0, 12, LEAF81D_CACHE_LINE_SIZE, NA, 0, FALSE) \ FIELD(81D, 0, EBX, 12, 10, LEAF81D_CACHE_PHYS_PARTITIONS, NA, 0, FALSE) \ FIELD(81D, 0, EBX, 22, 10, LEAF81D_CACHE_WAYS, NA, 0, FALSE) \ FIELD(81D, 0, ECX, 0, 32, LEAF81D_CACHE_NUM_SETS, NA, 0, FALSE) \ FLAG( 81D, 0, EDX, 0, 1, LEAF81D_CACHE_WBINVD, NA, 0, FALSE) \ FLAG( 81D, 0, EDX, 1, 1, LEAF81D_CACHE_INCLUSIVE, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_81E \ FIELD(81E, 0, EAX, 0, 32, EXTENDED_APICID, NA, 0, FALSE) \ FIELD(81E, 0, EBX, 0, 8, COMPUTE_UNIT_ID, NA, 0, FALSE) \ FIELD(81E, 0, EBX, 8, 2, CORES_PER_COMPUTE_UNIT, NA, 0, FALSE) \ FIELD(81E, 0, ECX, 0, 8, NODEID_VAL, NA, 0, FALSE) \ FIELD(81E, 0, ECX, 8, 3, NODES_PER_PKG, NA, 0, FALSE) /* LEVEL, SUB-LEVEL, REG, POS, SIZE, NAME, MON SUPP, HWV, CPL3 */ #define CPUID_FIELD_DATA_LEVEL_81F \ FLAG( 81F, 0, EAX, 0, 1, SME, NO, 0, FALSE) \ FLAG( 81F, 0, EAX, 1, 1, SEV, NO, 0, FALSE) \ FLAG( 81F, 0, EAX, 2, 1, PAGE_FLUSH_MSR, NO, 0, FALSE) \ FLAG( 81F, 0, EAX, 3, 1, SEV_ES, NO, 0, FALSE) \ FIELD(81F, 0, EBX, 0, 5, SME_PAGE_TABLE_BIT_NUM, NO, 0, FALSE) \ FIELD(81F, 0, EBX, 6, 6, SME_PHYS_ADDR_SPACE_REDUCTION, NO, 0, FALSE) \ FIELD(81F, 0, ECX, 0, 32, NUM_ENCRYPTED_GUESTS, NO, 0, FALSE) \ FIELD(81F, 0, EDX, 0, 32, SEV_MIN_ASID, NO, 0, FALSE) #define INTEL_CPUID_FIELD_DATA #define AMD_CPUID_FIELD_DATA #define CPUID_FIELD_DATA \ CPUID_FIELD_DATA_LEVEL_0 \ CPUID_FIELD_DATA_LEVEL_1 \ CPUID_FIELD_DATA_LEVEL_2 \ CPUID_FIELD_DATA_LEVEL_4 \ CPUID_FIELD_DATA_LEVEL_5 \ CPUID_FIELD_DATA_LEVEL_6 \ CPUID_FIELD_DATA_LEVEL_7 \ CPUID_FIELD_DATA_LEVEL_A \ CPUID_FIELD_DATA_LEVEL_B \ CPUID_FIELD_DATA_LEVEL_D \ CPUID_FIELD_DATA_LEVEL_F \ CPUID_FIELD_DATA_LEVEL_10 \ CPUID_FIELD_DATA_LEVEL_12 \ CPUID_FIELD_DATA_LEVEL_14 \ CPUID_FIELD_DATA_LEVEL_15 \ CPUID_FIELD_DATA_LEVEL_16 \ CPUID_FIELD_DATA_LEVEL_17 \ CPUID_FIELD_DATA_LEVEL_400 \ CPUID_FIELD_DATA_LEVEL_401 \ CPUID_FIELD_DATA_LEVEL_402 \ CPUID_FIELD_DATA_LEVEL_403 \ CPUID_FIELD_DATA_LEVEL_404 \ CPUID_FIELD_DATA_LEVEL_405 \ CPUID_FIELD_DATA_LEVEL_406 \ CPUID_FIELD_DATA_LEVEL_410 \ CPUID_FIELD_DATA_LEVEL_80 \ CPUID_FIELD_DATA_LEVEL_81 \ CPUID_FIELD_DATA_LEVEL_82 \ CPUID_FIELD_DATA_LEVEL_83 \ CPUID_FIELD_DATA_LEVEL_84 \ CPUID_FIELD_DATA_LEVEL_85 \ CPUID_FIELD_DATA_LEVEL_86 \ CPUID_FIELD_DATA_LEVEL_87 \ CPUID_FIELD_DATA_LEVEL_88 \ CPUID_FIELD_DATA_LEVEL_8A \ CPUID_FIELD_DATA_LEVEL_819 \ CPUID_FIELD_DATA_LEVEL_81A \ CPUID_FIELD_DATA_LEVEL_81B \ CPUID_FIELD_DATA_LEVEL_81C \ CPUID_FIELD_DATA_LEVEL_81D \ CPUID_FIELD_DATA_LEVEL_81E \ CPUID_FIELD_DATA_LEVEL_81F \ INTEL_CPUID_FIELD_DATA \ AMD_CPUID_FIELD_DATA /* * Define all field and flag values as an enum. The result is a full * set of values taken from the table above in the form: * * CPUID_<name>_MASK == mask for feature/field * CPUID_<name>_SHIFT == offset of field * * e.g. - CPUID_VIRT_BITS_MASK = 0xff00 * - CPUID_VIRT_BITS_SHIFT = 8 */ #define VMW_BIT_MASK(shift) (0xffffffffu >> (32 - shift)) #define FIELD(lvl, ecxIn, reg, bitpos, size, name, s, hwv, c3) \ CPUID_##name##_SHIFT = bitpos, \ CPUID_##name##_MASK = VMW_BIT_MASK(size) << bitpos, \ CPUID_INTERNAL_SHIFT_##name = bitpos, \ CPUID_INTERNAL_MASK_##name = VMW_BIT_MASK(size) << bitpos, \ CPUID_INTERNAL_REG_##name = CPUID_REG_##reg, \ CPUID_INTERNAL_EAXIN_##name = CPUID_LEVEL_VAL_##lvl, \ CPUID_INTERNAL_ECXIN_##name = ecxIn, \ CPUID_INTERNAL_HWV_##name = hwv, #define FLAG FIELD enum { /* Define data for every CPUID field we have */ CPUID_FIELD_DATA }; #undef VMW_BIT_MASK #undef FIELD #undef FLAG /* * CPUID_MASK -- * CPUID_SHIFT -- * CPUID_ISSET -- * CPUID_GET -- * CPUID_SET -- * CPUID_CLEAR -- * CPUID_SETTO -- * * Accessor macros for all CPUID consts/fields/flags. Level and reg are not * required, but are used to force compile-time asserts which help verify that * the flag is being used on the right CPUID input and result register. * * Note: ASSERT_ON_COMPILE is duplicated rather than factored into its own * macro, because token concatenation does not work as expected if an input is * #defined (e.g. APIC) when macros are nested. Also, compound statements * within parenthes is a GCC extension, so we must use runtime asserts with * other compilers. */ #if defined(__GNUC__) && !defined(__clang__) #define CPUID_MASK(eaxIn, reg, flag) \ ({ \ ASSERT_ON_COMPILE(eaxIn == CPUID_INTERNAL_EAXIN_##flag && \ CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \ CPUID_INTERNAL_MASK_##flag; \ }) #define CPUID_SHIFT(eaxIn, reg, flag) \ ({ \ ASSERT_ON_COMPILE(eaxIn == CPUID_INTERNAL_EAXIN_##flag && \ CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \ CPUID_INTERNAL_SHIFT_##flag; \ }) #define CPUID_ISSET(eaxIn, reg, flag, data) \ ({ \ ASSERT_ON_COMPILE(eaxIn == CPUID_INTERNAL_EAXIN_##flag && \ CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \ (((data) & CPUID_INTERNAL_MASK_##flag) != 0); \ }) #define CPUID_GET(eaxIn, reg, field, data) \ ({ \ ASSERT_ON_COMPILE(eaxIn == CPUID_INTERNAL_EAXIN_##field && \ CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##field); \ (((uint32)(data) & CPUID_INTERNAL_MASK_##field) >> \ CPUID_INTERNAL_SHIFT_##field); \ }) #else /* * CPUIDCheck -- * * Return val after verifying parameters. */ static INLINE uint32 CPUIDCheck(int32 eaxIn, int32 eaxInCheck, CpuidReg reg, CpuidReg regCheck, uint32 val) { ASSERT(eaxIn == eaxInCheck && reg == regCheck); return val; } #define CPUID_MASK(eaxIn, reg, flag) \ CPUIDCheck(eaxIn, CPUID_INTERNAL_EAXIN_##flag, \ CPUID_REG_##reg, (CpuidReg)CPUID_INTERNAL_REG_##flag, \ CPUID_INTERNAL_MASK_##flag) #define CPUID_SHIFT(eaxIn, reg, flag) \ CPUIDCheck(eaxIn, CPUID_INTERNAL_EAXIN_##flag, \ CPUID_REG_##reg, (CpuidReg)CPUID_INTERNAL_REG_##flag, \ CPUID_INTERNAL_SHIFT_##flag) #define CPUID_ISSET(eaxIn, reg, flag, data) \ (CPUIDCheck(eaxIn, CPUID_INTERNAL_EAXIN_##flag, \ CPUID_REG_##reg, (CpuidReg)CPUID_INTERNAL_REG_##flag, \ CPUID_INTERNAL_MASK_##flag & (data)) != 0) #define CPUID_GET(eaxIn, reg, field, data) \ CPUIDCheck(eaxIn, CPUID_INTERNAL_EAXIN_##field, \ CPUID_REG_##reg, (CpuidReg)CPUID_INTERNAL_REG_##field, \ ((uint32)(data) & CPUID_INTERNAL_MASK_##field) >> \ CPUID_INTERNAL_SHIFT_##field) #endif #define CPUID_SET(eaxIn, reg, flag, dataPtr) \ do { \ ASSERT_ON_COMPILE( \ (uint32)eaxIn == (uint32)CPUID_INTERNAL_EAXIN_##flag && \ CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \ *(dataPtr) |= CPUID_INTERNAL_MASK_##flag; \ } while (0) #define CPUID_CLEAR(eaxIn, reg, flag, dataPtr) \ do { \ ASSERT_ON_COMPILE( \ (uint32)eaxIn == (uint32)CPUID_INTERNAL_EAXIN_##flag && \ CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##flag); \ *(dataPtr) &= ~CPUID_INTERNAL_MASK_##flag; \ } while (0) #define CPUID_SETTO(eaxIn, reg, field, dataPtr, val) \ do { \ uint32 _v = val; \ uint32 *_d = dataPtr; \ ASSERT_ON_COMPILE( \ (uint32)eaxIn == (uint32)CPUID_INTERNAL_EAXIN_##field && \ CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##field); \ *_d = (*_d & ~CPUID_INTERNAL_MASK_##field) | \ (_v << CPUID_INTERNAL_SHIFT_##field); \ ASSERT(_v == (*_d & CPUID_INTERNAL_MASK_##field) >> \ CPUID_INTERNAL_SHIFT_##field); \ } while (0) #define CPUID_SETTO_SAFE(eaxIn, reg, field, dataPtr, val) \ do { \ uint32 _v = val & \ (CPUID_INTERNAL_MASK_##field >> CPUID_INTERNAL_SHIFT_##field); \ uint32 *_d = dataPtr; \ ASSERT_ON_COMPILE( \ (uint32)eaxIn == (uint32)CPUID_INTERNAL_EAXIN_##field && \ CPUID_REG_##reg == (CpuidReg)CPUID_INTERNAL_REG_##field); \ *_d = (*_d & ~CPUID_INTERNAL_MASK_##field) | \ (_v << CPUID_INTERNAL_SHIFT_##field); \ } while (0) /* * Definitions of various fields' values and more complicated * macros/functions for reading cpuid fields. */ #define CPUID_FAMILY_EXTENDED 15 /* Effective Intel CPU Families */ #define CPUID_FAMILY_486 4 #define CPUID_FAMILY_P5 5 #define CPUID_FAMILY_P6 6 #define CPUID_FAMILY_P4 15 /* Effective AMD CPU Families */ #define CPUID_FAMILY_5x86 0x4 #define CPUID_FAMILY_K5 0x5 #define CPUID_FAMILY_K6 0x5 #define CPUID_FAMILY_K7 0x6 #define CPUID_FAMILY_K8 0xf #define CPUID_FAMILY_K8L 0x10 #define CPUID_FAMILY_K8MOBILE 0x11 #define CPUID_FAMILY_LLANO 0x12 #define CPUID_FAMILY_BOBCAT 0x14 #define CPUID_FAMILY_BULLDOZER 0x15 // BD PD SR EX #define CPUID_FAMILY_KYOTO 0x16 // Note: Jaguar microarch #define CPUID_FAMILY_ZEN 0x17 /* Effective VIA CPU Families */ #define CPUID_FAMILY_C7 6 /* Effective Hygon CPU Families. */ #define CPUID_FAMILY_DHYANA 0x18 /* Intel model information */ #define CPUID_MODEL_PPRO 1 #define CPUID_MODEL_PII_03 3 #define CPUID_MODEL_PII_05 5 #define CPUID_MODEL_CELERON_06 6 #define CPUID_MODEL_PM_09 9 #define CPUID_MODEL_PM_0D 13 #define CPUID_MODEL_PM_0E 14 // Yonah / Sossaman #define CPUID_MODEL_CORE_0F 15 // Conroe / Merom #define CPUID_MODEL_CORE_17 0x17 // Penryn #define CPUID_MODEL_NEHALEM_1A 0x1a // Nehalem / Gainestown #define CPUID_MODEL_ATOM_1C 0x1c // Silverthorne / Diamondville #define CPUID_MODEL_CORE_1D 0x1d // Dunnington #define CPUID_MODEL_NEHALEM_1E 0x1e // Lynnfield #define CPUID_MODEL_NEHALEM_1F 0x1f // Havendale #define CPUID_MODEL_NEHALEM_25 0x25 // Westmere / Clarkdale #define CPUID_MODEL_ATOM_26 0x26 // Lincroft #define CPUID_MODEL_ATOM_27 0x27 // Saltwell #define CPUID_MODEL_SANDYBRIDGE_2A 0x2a // Sandybridge (desktop/mobile) #define CPUID_MODEL_NEHALEM_2C 0x2c // Westmere-EP #define CPUID_MODEL_SANDYBRIDGE_2D 0x2d // Sandybridge-EP #define CPUID_MODEL_NEHALEM_2E 0x2e // Nehalem-EX #define CPUID_MODEL_NEHALEM_2F 0x2f // Westmere-EX #define CPUID_MODEL_ATOM_35 0x35 // Cloverview #define CPUID_MODEL_ATOM_36 0x36 // Cedarview #define CPUID_MODEL_ATOM_37 0x37 // Bay Trail #define CPUID_MODEL_SANDYBRIDGE_3A 0x3a // Ivy Bridge #define CPUID_MODEL_HASWELL_3C 0x3c // Haswell DT #define CPUID_MODEL_BROADWELL_3D 0x3d // Broadwell-Ult #define CPUID_MODEL_SANDYBRIDGE_3E 0x3e // Ivy Bridge-EP #define CPUID_MODEL_HASWELL_3F 0x3f // Haswell EP/EN/EX #define CPUID_MODEL_HASWELL_45 0x45 // Haswell Ultrathin #define CPUID_MODEL_HASWELL_46 0x46 // Haswell (Crystal Well) #define CPUID_MODEL_BROADWELL_47 0x47 // Broadwell (Denlow) #define CPUID_MODEL_ATOM_4A 0x4a // Future Silvermont #define CPUID_MODEL_ATOM_4C 0x4c // Airmont #define CPUID_MODEL_ATOM_4D 0x4d // Avoton #define CPUID_MODEL_SKYLAKE_4E 0x4e // Skylake-Y / Kabylake U/Y ES #define CPUID_MODEL_BROADWELL_4F 0x4f // Broadwell EP/EN/EX #define CPUID_MODEL_SKYLAKE_55 0x55 // Skylake EP/EN/EX #define CPUID_MODEL_BROADWELL_56 0x56 // Broadwell DE #define CPUID_MODEL_KNL_57 0x57 // Knights Landing #define CPUID_MODEL_ATOM_5A 0x5a // Future Silvermont #define CPUID_MODEL_ATOM_5D 0x5d // Future Silvermont #define CPUID_MODEL_SKYLAKE_5E 0x5e // Skylake-S / Kabylake S/H ES #define CPUID_MODEL_ATOM_5F 0x5f // Denverton #define CPUID_MODEL_KNM_85 0x85 // Knights Mill #define CPUID_MODEL_KABYLAKE_8E 0x8e // Kabylake U/Y QS #define CPUID_MODEL_KABYLAKE_9E 0x9e // Kabylake S/H QS /* Intel stepping information */ #define CPUID_STEPPING_KABYLAKE_ES 0x8 // Kabylake S/H/U/Y ES #define CPUID_STEPPING_COFFEELAKE_A 0xA // Coffeelake U/S/H #define CPUID_STEPPING_COFFEELAKE_B 0xB // Coffeelake S/H #define CPUID_MODEL_PIII_07 7 #define CPUID_MODEL_PIII_08 8 #define CPUID_MODEL_PIII_0A 10 /* AMD model information */ #define CPUID_MODEL_BARCELONA_02 0x02 // Barcelona (Opteron & Phenom) #define CPUID_MODEL_SHANGHAI_04 0x04 // Shanghai RB #define CPUID_MODEL_SHANGHAI_05 0x05 // Shanghai BL #define CPUID_MODEL_SHANGHAI_06 0x06 // Shanghai DA #define CPUID_MODEL_ISTANBUL_MAGNY_08 0x08 // Istanbul (6 core) & Magny-cours (12) HY #define CPUID_MODEL_ISTANBUL_MAGNY_09 0x09 // HY - G34 package #define CPUID_MODEL_PHAROAH_HOUND_0A 0x0A // Pharoah Hound #define CPUID_MODEL_PILEDRIVER_1F 0x1F // Max piledriver model defined in BKDG #define CPUID_MODEL_PILEDRIVER_10 0x10 // family == CPUID_FAMILY_BULLDOZER #define CPUID_MODEL_PILEDRIVER_02 0x02 // family == CPUID_FAMILY_BULLDOZER #define CPUID_MODEL_OPTERON_REVF_41 0x41 // family == CPUID_FAMILY_K8 #define CPUID_MODEL_KYOTO_00 0x00 // family == CPUID_FAMILY_KYOTO #define CPUID_MODEL_STEAMROLLER_3F 0x3F // Max Steamroller model defined in BKDG #define CPUID_MODEL_STEAMROLLER_30 0x30 // family == CPUID_FAMILY_BULLDOZER #define CPUID_MODEL_EXCAVATOR_60 0x60 // family == CPUID_FAMILY_BULLDOZER #define CPUID_MODEL_EXCAVATOR_6F 0x6F // Max Excavator model defined in BKDG #define CPUID_MODEL_ZEN_00 0x00 // family == CPUID_FAMILY_ZEN #define CPUID_MODEL_ZEN_1F 0x1F // Max Zen model defined in BKDG /* VIA model information */ #define CPUID_MODEL_NANO 15 // Isaiah /* Hygon model information. */ #define CPUID_MODEL_DHYANA_A 0 // Dhyana A /* *---------------------------------------------------------------------- * * CPUID_IsVendor{AMD,Intel,VIA,Hygon} -- * * Determines if the vendor string in cpuid id0 is from * {AMD,Intel,VIA,Hygon}. * * Results: * True iff vendor string is CPUID_{AMD,INTEL,VIA,HYGON}_VENDOR_STRING * * Side effects: * None. * *---------------------------------------------------------------------- */ static INLINE Bool CPUID_IsRawVendor(CPUIDRegs *id0, const char* vendor) { // hard to get strcmp() in some environments, so do it in the raw return (id0->ebx == *(const uint32 *) (vendor + 0) && id0->ecx == *(const uint32 *) (vendor + 4) && id0->edx == *(const uint32 *) (vendor + 8)); } static INLINE Bool CPUID_IsVendorAMD(CPUIDRegs *id0) { return CPUID_IsRawVendor(id0, CPUID_AMD_VENDOR_STRING); } static INLINE Bool CPUID_IsVendorIntel(CPUIDRegs *id0) { return CPUID_IsRawVendor(id0, CPUID_INTEL_VENDOR_STRING); } static INLINE Bool CPUID_IsVendorVIA(CPUIDRegs *id0) { return CPUID_IsRawVendor(id0, CPUID_VIA_VENDOR_STRING); } static INLINE Bool CPUID_IsVendorHygon(CPUIDRegs *id0) { return CPUID_IsRawVendor(id0, CPUID_HYGON_VENDOR_STRING); } static INLINE uint32 CPUID_EFFECTIVE_FAMILY(uint32 v) /* %eax from CPUID with %eax=1. */ { uint32 f = CPUID_GET(1, EAX, FAMILY, v); return f != CPUID_FAMILY_EXTENDED ? f : f + CPUID_GET(1, EAX, EXTENDED_FAMILY, v); } /* Normally only used when FAMILY==CPUID_FAMILY_EXTENDED, but Intel is * now using the extended model field for FAMILY==CPUID_FAMILY_P6 to * refer to the newer Core2 CPUs */ static INLINE uint32 CPUID_EFFECTIVE_MODEL(uint32 v) /* %eax from CPUID with %eax=1. */ { uint32 m = CPUID_GET(1, EAX, MODEL, v); uint32 em = CPUID_GET(1, EAX, EXTENDED_MODEL, v); return m + (em << 4); } static INLINE uint32 CPUID_EFFECTIVE_STEPPING(uint32 v) /* %eax from CPUID with %eax=1. */ { return CPUID_GET(1, EAX, STEPPING, v); } /* * Notice that CPUID families for Intel and AMD overlap. The following macros * should only be used AFTER the manufacturer has been established (through * the use of CPUID standard function 0). */ static INLINE Bool CPUID_FAMILY_IS_486(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_486; } static INLINE Bool CPUID_FAMILY_IS_P5(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_P5; } static INLINE Bool CPUID_FAMILY_IS_P6(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_P6; } static INLINE Bool CPUID_FAMILY_IS_PENTIUM4(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_P4; } /* * Intel Pentium M processors are Yonah/Sossaman or an older P-M */ static INLINE Bool CPUID_UARCH_IS_PENTIUM_M(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && (CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PM_09 || CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PM_0D || CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PM_0E); } /* * Intel Core processors are Merom, Conroe, Woodcrest, Clovertown, * Penryn, Dunnington, Kentsfield, Yorktown, Harpertown, ........ */ static INLINE Bool CPUID_UARCH_IS_CORE(uint32 v) // IN: %eax from CPUID with %eax=1. { uint32 model = CPUID_EFFECTIVE_MODEL(v); /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && model >= CPUID_MODEL_CORE_0F && (model < CPUID_MODEL_NEHALEM_1A || model == CPUID_MODEL_CORE_1D); } /* * Intel Nehalem processors are: Nehalem, Gainestown, Lynnfield, Clarkdale. */ static INLINE Bool CPUID_UARCH_IS_NEHALEM(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v); return CPUID_FAMILY_IS_P6(v) && (effectiveModel == CPUID_MODEL_NEHALEM_1A || effectiveModel == CPUID_MODEL_NEHALEM_1E || effectiveModel == CPUID_MODEL_NEHALEM_1F || effectiveModel == CPUID_MODEL_NEHALEM_25 || effectiveModel == CPUID_MODEL_NEHALEM_2C || effectiveModel == CPUID_MODEL_NEHALEM_2E || effectiveModel == CPUID_MODEL_NEHALEM_2F); } static INLINE Bool CPUID_UARCH_IS_SANDYBRIDGE(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v); return CPUID_FAMILY_IS_P6(v) && (effectiveModel == CPUID_MODEL_SANDYBRIDGE_2A || effectiveModel == CPUID_MODEL_SANDYBRIDGE_2D || effectiveModel == CPUID_MODEL_SANDYBRIDGE_3E || effectiveModel == CPUID_MODEL_SANDYBRIDGE_3A); } static INLINE Bool CPUID_MODEL_IS_BROADWELL(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v); return CPUID_FAMILY_IS_P6(v) && (effectiveModel == CPUID_MODEL_BROADWELL_3D || effectiveModel == CPUID_MODEL_BROADWELL_47 || effectiveModel == CPUID_MODEL_BROADWELL_4F || effectiveModel == CPUID_MODEL_BROADWELL_56); } static INLINE Bool CPUID_MODEL_IS_HASWELL(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v); return CPUID_FAMILY_IS_P6(v) && (effectiveModel == CPUID_MODEL_HASWELL_3C || effectiveModel == CPUID_MODEL_HASWELL_3F || effectiveModel == CPUID_MODEL_HASWELL_45 || effectiveModel == CPUID_MODEL_HASWELL_46); } static INLINE Bool CPUID_MODEL_IS_SKYLAKE(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && ((CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_SKYLAKE_5E && CPUID_EFFECTIVE_STEPPING(v) != CPUID_STEPPING_KABYLAKE_ES) || CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_SKYLAKE_55 || (CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_SKYLAKE_4E && CPUID_EFFECTIVE_STEPPING(v) != CPUID_STEPPING_KABYLAKE_ES)); } static INLINE Bool CPUID_MODEL_IS_COFFEELAKE(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && ((CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_KABYLAKE_9E && (CPUID_EFFECTIVE_STEPPING(v) == CPUID_STEPPING_COFFEELAKE_A || CPUID_EFFECTIVE_STEPPING(v) == CPUID_STEPPING_COFFEELAKE_B)) || (CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_KABYLAKE_8E && CPUID_EFFECTIVE_STEPPING(v) == CPUID_STEPPING_COFFEELAKE_A)); } static INLINE Bool CPUID_MODEL_IS_KABYLAKE(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && !CPUID_MODEL_IS_COFFEELAKE(v) && (CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_KABYLAKE_9E || CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_KABYLAKE_8E || (CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_SKYLAKE_5E && CPUID_EFFECTIVE_STEPPING(v) == CPUID_STEPPING_KABYLAKE_ES) || (CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_SKYLAKE_4E && CPUID_EFFECTIVE_STEPPING(v) == CPUID_STEPPING_KABYLAKE_ES)); } static INLINE Bool CPUID_UARCH_IS_SKYLAKE(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && (CPUID_MODEL_IS_COFFEELAKE(v) || CPUID_MODEL_IS_KABYLAKE(v) || CPUID_MODEL_IS_SKYLAKE(v)); } static INLINE Bool CPUID_UARCH_IS_HASWELL(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && (CPUID_MODEL_IS_BROADWELL(v) || CPUID_MODEL_IS_HASWELL(v)); } static INLINE Bool CPUID_MODEL_IS_CENTERTON(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_ATOM_1C; } static INLINE Bool CPUID_MODEL_IS_AVOTON(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_ATOM_4D; } static INLINE Bool CPUID_MODEL_IS_BAYTRAIL(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_ATOM_37; } static INLINE Bool CPUID_UARCH_IS_SILVERMONT(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && (CPUID_MODEL_IS_AVOTON(v) || CPUID_MODEL_IS_BAYTRAIL(v)); } static INLINE Bool CPUID_MODEL_IS_DENVERTON(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_ATOM_5F; } static INLINE Bool CPUID_MODEL_IS_WESTMERE(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v); return CPUID_FAMILY_IS_P6(v) && (effectiveModel == CPUID_MODEL_NEHALEM_25 || // Clarkdale effectiveModel == CPUID_MODEL_NEHALEM_2C || // Westmere-EP effectiveModel == CPUID_MODEL_NEHALEM_2F); // Westmere-EX } static INLINE Bool CPUID_MODEL_IS_SANDYBRIDGE(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v); return CPUID_FAMILY_IS_P6(v) && (effectiveModel == CPUID_MODEL_SANDYBRIDGE_2A || effectiveModel == CPUID_MODEL_SANDYBRIDGE_2D); } static INLINE Bool CPUID_MODEL_IS_IVYBRIDGE(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ uint32 effectiveModel = CPUID_EFFECTIVE_MODEL(v); return CPUID_FAMILY_IS_P6(v) && ( effectiveModel == CPUID_MODEL_SANDYBRIDGE_3E || effectiveModel == CPUID_MODEL_SANDYBRIDGE_3A); } static INLINE Bool CPUID_MODEL_IS_KNIGHTS_LANDING(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_KNL_57; } static INLINE Bool CPUID_MODEL_IS_KNIGHTS_MILL(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is Intel. */ return CPUID_FAMILY_IS_P6(v) && CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_KNM_85; } static INLINE Bool CPUID_FAMILY_IS_K7(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_K7; } static INLINE Bool CPUID_FAMILY_IS_K8(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_K8; } /* *---------------------------------------------------------------------- * * CPUID_FAMILY_IS_K8EXT -- * * Return TRUE for family K8 with effective model >= 0x10. * *---------------------------------------------------------------------- */ static INLINE Bool CPUID_FAMILY_IS_K8EXT(uint32 eax) { return CPUID_FAMILY_IS_K8(eax) && CPUID_GET(1, EAX, EXTENDED_MODEL, eax) != 0; } static INLINE Bool CPUID_FAMILY_IS_K8L(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_K8L || CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_LLANO; } static INLINE Bool CPUID_FAMILY_IS_LLANO(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_LLANO; } static INLINE Bool CPUID_FAMILY_IS_K8MOBILE(uint32 eax) { /* Essentially a K8 (not K8L) part, but with mobile features. */ return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_K8MOBILE; } static INLINE Bool CPUID_FAMILY_IS_K8STAR(uint32 eax) { /* * Read function name as "K8*", as in wildcard. * Matches K8 or K8L or K8MOBILE */ return CPUID_FAMILY_IS_K8(eax) || CPUID_FAMILY_IS_K8L(eax) || CPUID_FAMILY_IS_K8MOBILE(eax); } static INLINE Bool CPUID_FAMILY_IS_BOBCAT(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_BOBCAT; } static INLINE Bool CPUID_FAMILY_IS_BULLDOZER(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_BULLDOZER; } static INLINE Bool CPUID_FAMILY_IS_KYOTO(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_KYOTO; } static INLINE Bool CPUID_FAMILY_IS_ZEN(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_ZEN; } /* * AMD Barcelona (of either Opteron or Phenom kind). */ static INLINE Bool CPUID_MODEL_IS_BARCELONA(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is AMD. */ return CPUID_EFFECTIVE_FAMILY(v) == CPUID_FAMILY_K8L && CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_BARCELONA_02; } static INLINE Bool CPUID_MODEL_IS_SHANGHAI(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is AMD. */ return CPUID_EFFECTIVE_FAMILY(v) == CPUID_FAMILY_K8L && (CPUID_MODEL_SHANGHAI_04 <= CPUID_EFFECTIVE_MODEL(v) && CPUID_EFFECTIVE_MODEL(v) <= CPUID_MODEL_SHANGHAI_06); } static INLINE Bool CPUID_MODEL_IS_ISTANBUL_MAGNY(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is AMD. */ return CPUID_EFFECTIVE_FAMILY(v) == CPUID_FAMILY_K8L && (CPUID_MODEL_ISTANBUL_MAGNY_08 <= CPUID_EFFECTIVE_MODEL(v) && CPUID_EFFECTIVE_MODEL(v) <= CPUID_MODEL_ISTANBUL_MAGNY_09); } static INLINE Bool CPUID_MODEL_IS_PHAROAH_HOUND(uint32 v) // IN: %eax from CPUID with %eax=1. { /* Assumes the CPU manufacturer is AMD. */ return CPUID_EFFECTIVE_FAMILY(v) == CPUID_FAMILY_K8L && CPUID_EFFECTIVE_MODEL(v) == CPUID_MODEL_PHAROAH_HOUND_0A; } static INLINE Bool CPUID_MODEL_IS_BULLDOZER(uint32 eax) { /* * Bulldozer is models of family 0x15 that are below 10 excluding * Piledriver 02. */ return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_BULLDOZER && CPUID_EFFECTIVE_MODEL(eax) < CPUID_MODEL_PILEDRIVER_10 && CPUID_EFFECTIVE_MODEL(eax) != CPUID_MODEL_PILEDRIVER_02; } static INLINE Bool CPUID_MODEL_IS_PILEDRIVER(uint32 eax) { /* Piledriver is models 0x02 & 0x10 of family 0x15 (so far). */ return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_BULLDOZER && ((CPUID_EFFECTIVE_MODEL(eax) >= CPUID_MODEL_PILEDRIVER_10 && CPUID_EFFECTIVE_MODEL(eax) <= CPUID_MODEL_PILEDRIVER_1F) || CPUID_EFFECTIVE_MODEL(eax) == CPUID_MODEL_PILEDRIVER_02); } static INLINE Bool CPUID_MODEL_IS_STEAMROLLER(uint32 eax) { /* Steamroller is model 0x30 of family 0x15 (so far). */ return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_BULLDOZER && (CPUID_EFFECTIVE_MODEL(eax) >= CPUID_MODEL_STEAMROLLER_30 && CPUID_EFFECTIVE_MODEL(eax) <= CPUID_MODEL_STEAMROLLER_3F); } static INLINE Bool CPUID_MODEL_IS_EXCAVATOR(uint32 eax) { /* Excavator is model 0x60 of family 0x15 (so far). */ return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_BULLDOZER && (CPUID_EFFECTIVE_MODEL(eax) >= CPUID_MODEL_EXCAVATOR_60 && CPUID_EFFECTIVE_MODEL(eax) <= CPUID_MODEL_EXCAVATOR_6F); } static INLINE Bool CPUID_MODEL_IS_KYOTO(uint32 eax) { /* Kyoto is models 0x00 of family 0x16 (so far). */ return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_KYOTO && CPUID_EFFECTIVE_MODEL(eax) == CPUID_MODEL_KYOTO_00; } static INLINE Bool CPUID_MODEL_IS_ZEN(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_ZEN && CPUID_EFFECTIVE_MODEL(eax) <= CPUID_MODEL_ZEN_1F; } static INLINE Bool CPUID_FAMILY_IS_DHYANA(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_DHYANA; } static INLINE Bool CPUID_MODEL_IS_DHYANA_A(uint32 eax) { return CPUID_EFFECTIVE_FAMILY(eax) == CPUID_FAMILY_DHYANA && CPUID_EFFECTIVE_MODEL(eax) == CPUID_MODEL_DHYANA_A; } #define CPUID_TYPE_PRIMARY 0 #define CPUID_TYPE_OVERDRIVE 1 #define CPUID_TYPE_SECONDARY 2 #define CPUID_INTEL_ID4EAX_LEAF4_CACHE_TYPE_NULL 0 #define CPUID_INTEL_ID4EAX_LEAF4_CACHE_TYPE_DATA 1 #define CPUID_INTEL_ID4EAX_LEAF4_CACHE_TYPE_INST 2 #define CPUID_INTEL_ID4EAX_LEAF4_CACHE_TYPE_UNIF 3 #define CPUID_LEAF4_CACHE_TYPE_NULL 0 #define CPUID_LEAF4_CACHE_TYPE_DATA 1 #define CPUID_LEAF4_CACHE_TYPE_INST 2 #define CPUID_LEAF4_CACHE_TYPE_UNIF 3 #define CPUID_LEAF4_CACHE_INDEXING_DIRECT 0 #define CPUID_LEAF4_CACHE_INDEXING_COMPLEX 1 #define CPUID_INTEL_ID4EAX_LEAF4_CACHE_SELF_INIT 0x00000100 #define CPUID_INTEL_ID4EAX_LEAF4_CACHE_FULLY_ASSOC 0x00000200 #define CPUID_LEAF4_CACHE_SELF_INIT 0x00000100 #define CPUID_LEAF4_CACHE_FULLY_ASSOC 0x00000200 #define CPUID_INTEL_IDBECX_LEVEL_TYPE_INVALID 0 #define CPUID_INTEL_IDBECX_LEVEL_TYPE_SMT 1 #define CPUID_INTEL_IDBECX_LEVEL_TYPE_CORE 2 #define CPUID_TOPOLOGY_LEVEL_TYPE_INVALID 0 #define CPUID_TOPOLOGY_LEVEL_TYPE_SMT 1 #define CPUID_TOPOLOGY_LEVEL_TYPE_CORE 2 /* * For certain AMD processors, an lfence instruction is necessary at various * places to ensure ordering. */ static INLINE Bool CPUID_VendorRequiresFence(CpuidVendor vendor) { return vendor == CPUID_VENDOR_AMD; } static INLINE Bool CPUID_VersionRequiresFence(uint32 version) { return CPUID_EFFECTIVE_FAMILY(version) == CPUID_FAMILY_K8 && CPUID_EFFECTIVE_MODEL(version) < 0x40; } static INLINE Bool CPUID_ID0RequiresFence(CPUIDRegs *id0) { if (id0->eax == 0) { return FALSE; } return CPUID_IsVendorAMD(id0); } static INLINE Bool CPUID_ID1RequiresFence(CPUIDRegs *id1) { return CPUID_VersionRequiresFence(id1->eax); } static INLINE Bool CPUID_RequiresFence(CpuidVendor vendor, // IN uint32 version) // IN: %eax from CPUID with %eax=1. { return CPUID_VendorRequiresFence(vendor) && CPUID_VersionRequiresFence(version); } /* * The following low-level functions compute the number of * cores per cpu. They should be used cautiously because * they do not necessarily work on all types of CPUs. * High-level functions that are correct for all CPUs are * available elsewhere: see lib/cpuidInfo/cpuidInfo.c. */ static INLINE uint32 CPUID_IntelCoresPerPackage(uint32 v) /* %eax from CPUID with %eax=4 and %ecx=0. */ { // Note: This is not guaranteed to work on older Intel CPUs. return 1 + CPUID_GET(4, EAX, LEAF4_CORE_COUNT, v); } static INLINE uint32 CPUID_AMDCoresPerPackage(uint32 v) /* %ecx from CPUID with %eax=0x80000008. */ { // Note: This is not guaranteed to work on older AMD CPUs. return 1 + CPUID_GET(0x80000008, ECX, LEAF88_CORE_COUNT, v); } /* * Hypervisor CPUID space is 0x400000XX. */ static INLINE Bool CPUID_IsHypervisorLevel(uint32 level) { return (level & 0xffffff00) == 0x40000000; } /* *---------------------------------------------------------------------- * * CPUID_LevelUsesEcx -- * * Returns TRUE for leaves that support input ECX != 0 (subleaves). * *---------------------------------------------------------------------- */ static INLINE Bool CPUID_LevelUsesEcx(uint32 level) { switch (level) { #define CPUIDLEVEL(t, s, v, c, h) \ case v: \ return c != 0; CPUID_ALL_LEVELS #undef CPUIDLEVEL default: return FALSE; } } /* *---------------------------------------------------------------------- * * CPUID_IsValid*Subleaf -- * * Functions to determine the last subleaf for the level specified * *---------------------------------------------------------------------- */ static INLINE Bool CPUID_IsValidBSubleaf(uint32 ebx) // IN: %ebx = cpuid.b.sublevel.ebx { return ebx != 0; } static INLINE Bool CPUID_IsValid4Subleaf(uint32 eax) // IN: %eax = cpuid.4.sublevel.eax { return eax != 0; } static INLINE Bool CPUID_IsValid7Subleaf(uint32 eax, uint32 subleaf) // IN: %eax = cpuid.7.0.eax { /* * cpuid.7.0.eax is the max ecx (subleaf) index */ return subleaf <= eax; } /* *---------------------------------------------------------------------- * * CPUID_IsValidDSubleaf -- * * It is the caller's repsonsibility to determine if the processor * supports XSAVE and therefore has D sub-leaves. * *---------------------------------------------------------------------- */ static INLINE Bool CPUID_IsValidDSubleaf(uint32 subleaf) // IN: subleaf to check { return subleaf <= 63; } /* *---------------------------------------------------------------------- * * CPUID_SupportsMsrPlatformInfo -- * * Uses vendor and cpuid.1.0.eax to determine if the processor * supports MSR_PLATFORM_INFO. * *---------------------------------------------------------------------- */ static INLINE Bool CPUID_SupportsMsrPlatformInfo(CpuidVendor vendor, uint32 version) { return vendor == CPUID_VENDOR_INTEL && (CPUID_UARCH_IS_NEHALEM(version) || CPUID_UARCH_IS_SANDYBRIDGE(version) || CPUID_UARCH_IS_HASWELL(version) || CPUID_UARCH_IS_SKYLAKE(version) || CPUID_MODEL_IS_KNIGHTS_LANDING(version) || CPUID_MODEL_IS_DENVERTON(version) || CPUID_UARCH_IS_SILVERMONT(version)); } #ifdef _MSC_VER #pragma warning (pop) #endif #if defined __cplusplus } // extern "C" #endif #endif // _X86CPUID_H_ vmhgfs-only/shared/vm_basic_asm.h 0000444 0000000 0000000 00000071321 13432725350 016075 0 ustar root root /********************************************************* * Copyright (C) 2003-2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vm_basic_asm.h * * Basic asm macros */ #ifndef _VM_BASIC_ASM_H_ #define _VM_BASIC_ASM_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" #include "vm_basic_types.h" #if defined VM_X86_64 #include "vm_basic_asm_x86_common.h" #include "vm_basic_asm_x86_64.h" #elif defined VM_X86_32 #include "vm_basic_asm_x86_common.h" #include "vm_basic_asm_x86.h" #elif defined VM_ARM_32 #include "vm_basic_asm_arm32.h" #define MUL64_NO_ASM 1 #include "mul64.h" #elif defined VM_ARM_64 #include "arm64_basic_defs.h" #include "vm_basic_asm_arm64.h" #else #define MUL64_NO_ASM 1 #include "mul64.h" #endif #if defined __cplusplus extern "C" { #endif /* * Locate most and least significant bit set functions. Use our own name * space to avoid namespace collisions. The new names follow a pattern, * <prefix><size><option>, where: * * <prefix> is [lm]ssb (least/most significant bit set) * <size> is size of the argument: 32 (32-bit), 64 (64-bit) or Ptr (pointer) * <option> is for alternative versions of the functions * * NAME FUNCTION BITS FUNC(0) *----- -------- ---- ------- * lssb32_0 LSB set (uint32) 0..31 -1 * mssb32_0 MSB set (uint32) 0..31 -1 * lssb64_0 LSB set (uint64) 0..63 -1 * mssb64_0 MSB set (uint64) 0..63 -1 * lssbPtr_0 LSB set (uintptr_t;32-bit) 0..31 -1 * lssbPtr_0 LSB set (uintptr_t;64-bit) 0..63 -1 * mssbPtr_0 MSB set (uintptr_t;32-bit) 0..31 -1 * mssbPtr_0 MSB set (uintptr_t;64-bit) 0..63 -1 * lssbPtr LSB set (uintptr_t;32-bit) 1..32 0 * lssbPtr LSB set (uintptr_t;64-bit) 1..64 0 * mssbPtr MSB set (uintptr_t;32-bit) 1..32 0 * mssbPtr MSB set (uintptr_t;64-bit) 1..64 0 * lssb32 LSB set (uint32) 1..32 0 * mssb32 MSB set (uint32) 1..32 0 * lssb64 LSB set (uint64) 1..64 0 * mssb64 MSB set (uint64) 1..64 0 */ #ifdef _MSC_VER static INLINE int lssb32_0(const uint32 value) { unsigned long idx; unsigned char ret; if (UNLIKELY(value == 0)) { return -1; } ret = _BitScanForward(&idx, (unsigned long)value); #ifdef __analysis_assume __analysis_assume(ret != 0); #endif #pragma warning(suppress: 6001 6102) // Suppress bogus complaint that idx may be uninitialized in error case return idx; } static INLINE int mssb32_0(const uint32 value) { unsigned long idx; unsigned char ret; if (UNLIKELY(value == 0)) { return -1; } ret = _BitScanReverse(&idx, (unsigned long)value); #ifdef __analysis_assume __analysis_assume(ret != 0); #endif #pragma warning(suppress: 6001 6102) // Suppress bogus complaint that idx may be uninitialized in error case return idx; } static INLINE int lssb64_0(const uint64 value) { if (UNLIKELY(value == 0)) { return -1; } else { #ifdef VM_X86_64 unsigned long idx; unsigned char ret; ret = _BitScanForward64(&idx, (unsigned __int64)value); #ifdef __analysis_assume __analysis_assume(ret != 0); #endif #pragma warning(suppress: 6001 6102) // Suppress bogus complaint that idx may be uninitialized in error case return idx; #else /* The coding was chosen to minimize conditionals and operations */ int lowFirstBit = lssb32_0((uint32) value); if (lowFirstBit == -1) { lowFirstBit = lssb32_0((uint32) (value >> 32)); if (lowFirstBit != -1) { return lowFirstBit + 32; } } return lowFirstBit; #endif } } static INLINE int mssb64_0(const uint64 value) { if (UNLIKELY(value == 0)) { return -1; } else { #ifdef VM_X86_64 unsigned long idx; unsigned char ret; ret = _BitScanReverse64(&idx, (unsigned __int64)value); #ifdef __analysis_assume __analysis_assume(ret != 0); #endif #pragma warning(suppress: 6001 6102) // Suppress bogus complaint that idx may be uninitialized in error case return idx; #else /* The coding was chosen to minimize conditionals and operations */ if (value > 0xFFFFFFFFULL) { return 32 + mssb32_0((uint32) (value >> 32)); } return mssb32_0((uint32) value); #endif } } #endif #ifdef __GNUC__ #ifdef VM_X86_ANY #define USE_ARCH_X86_CUSTOM #endif /* ********************************************************** * GCC's intrinsics for the lssb and mssb family produce sub-optimal code, * so we use inline assembly to improve matters. However, GCC cannot * propagate constants through inline assembly, so we help GCC out by * allowing it to use its intrinsics for compile-time constant values. * Some day, GCC will make better code and these can collapse to intrinsics. * * For example, in Decoder_AddressSize, inlined into VVT_GetVTInstrInfo: * __builtin_ffs(a) compiles to: * mov $0xffffffff, %esi * bsf %eax, %eax * cmovz %esi, %eax * sub $0x1, %eax * and $0x7, %eax * * While the code below compiles to: * bsf %eax, %eax * sub $0x1, %eax * * Ideally, GCC should have recognized non-zero input in the first case. * Other instances of the intrinsic produce code like * sub $1, %eax; add $1, %eax; clts * ********************************************************** */ #if __GNUC__ < 4 #define FEWER_BUILTINS #endif static INLINE int lssb32_0(uint32 value) { #ifdef USE_ARCH_X86_CUSTOM if (!__builtin_constant_p(value)) { if (UNLIKELY(value == 0)) { return -1; } else { int pos; __asm__ ("bsfl %1, %0\n" : "=r" (pos) : "rm" (value) : "cc"); return pos; } } #endif return __builtin_ffs(value) - 1; } #ifndef FEWER_BUILTINS static INLINE int mssb32_0(uint32 value) { /* * We must keep the UNLIKELY(...) outside the #if defined ... * because __builtin_clz(0) is undefined according to gcc's * documentation. */ if (UNLIKELY(value == 0)) { return -1; } else { int pos; #ifdef USE_ARCH_X86_CUSTOM if (!__builtin_constant_p(value)) { __asm__ ("bsrl %1, %0\n" : "=r" (pos) : "rm" (value) : "cc"); return pos; } #endif pos = 32 - __builtin_clz(value) - 1; return pos; } } static INLINE int lssb64_0(const uint64 value) { #ifdef USE_ARCH_X86_CUSTOM if (!__builtin_constant_p(value)) { if (UNLIKELY(value == 0)) { return -1; } else { intptr_t pos; #ifdef VM_X86_64 __asm__ ("bsf %1, %0\n" : "=r" (pos) : "rm" (value) : "cc"); #else /* The coding was chosen to minimize conditionals and operations */ pos = lssb32_0((uint32) value); if (pos == -1) { pos = lssb32_0((uint32) (value >> 32)); if (pos != -1) { return pos + 32; } } #endif return pos; } } #endif return __builtin_ffsll(value) - 1; } #endif /* !FEWER_BUILTINS */ #ifdef FEWER_BUILTINS /* GCC 3.3.x does not like __bulitin_clz or __builtin_ffsll. */ static INLINE int mssb32_0(uint32 value) { if (UNLIKELY(value == 0)) { return -1; } else { int pos; __asm__ __volatile__("bsrl %1, %0\n" : "=r" (pos) : "rm" (value) : "cc"); return pos; } } static INLINE int lssb64_0(const uint64 value) { if (UNLIKELY(value == 0)) { return -1; } else { intptr_t pos; #ifdef VM_X86_64 __asm__ __volatile__("bsf %1, %0\n" : "=r" (pos) : "rm" (value) : "cc"); #else /* The coding was chosen to minimize conditionals and operations */ pos = lssb32_0((uint32) value); if (pos == -1) { pos = lssb32_0((uint32) (value >> 32)); if (pos != -1) { return pos + 32; } } #endif /* VM_X86_64 */ return pos; } } #endif /* FEWER_BUILTINS */ static INLINE int mssb64_0(const uint64 value) { if (UNLIKELY(value == 0)) { return -1; } else { intptr_t pos; #ifdef USE_ARCH_X86_CUSTOM #ifdef VM_X86_64 __asm__ ("bsr %1, %0\n" : "=r" (pos) : "rm" (value) : "cc"); #else /* The coding was chosen to minimize conditionals and operations */ if (value > 0xFFFFFFFFULL) { pos = 32 + mssb32_0((uint32) (value >> 32)); } else { pos = mssb32_0((uint32) value); } #endif #else pos = 64 - __builtin_clzll(value) - 1; #endif return pos; } } #ifdef USE_ARCH_X86_CUSTOM #undef USE_ARCH_X86_CUSTOM #endif #endif // __GNUC__ static INLINE int lssbPtr_0(const uintptr_t value) { #ifdef VM_64BIT return lssb64_0((uint64) value); #else return lssb32_0((uint32) value); #endif } static INLINE int lssbPtr(const uintptr_t value) { return lssbPtr_0(value) + 1; } static INLINE int mssbPtr_0(const uintptr_t value) { #ifdef VM_64BIT return mssb64_0((uint64) value); #else return mssb32_0((uint32) value); #endif } static INLINE int mssbPtr(const uintptr_t value) { return mssbPtr_0(value) + 1; } static INLINE int lssb32(const uint32 value) { return lssb32_0(value) + 1; } static INLINE int mssb32(const uint32 value) { return mssb32_0(value) + 1; } static INLINE int lssb64(const uint64 value) { return lssb64_0(value) + 1; } static INLINE int mssb64(const uint64 value) { return mssb64_0(value) + 1; } #ifdef __GNUC__ #if defined(VM_X86_ANY) || defined(VM_ARM_ANY) /* *---------------------------------------------------------------------- * * uint16set -- * * memset a given address with an uint16 value, count times. * * Results: * Pointer to filled memory range. * * Side effects: * As with memset. * *---------------------------------------------------------------------- */ static INLINE void * uint16set(void *dst, uint16 val, size_t count) { #ifdef VM_ARM_32 void *tmpDst = dst; __asm__ __volatile__ ( "cmp %1, #0\n\t" "beq 2f\n" "1:\n\t" "strh %2, [%0], #2\n\t" "subs %1, %1, #1\n\t" "bne 1b\n" "2:" : "+r" (tmpDst), "+r" (count) : "r" (val) : "cc", "memory"); #elif defined(VM_ARM_64) void *tmpDst = dst; uint64 tmpVal = 0; if (count == 0) { return dst; } __asm__ __volatile__ ( "cbz %3, 1f\n\t" // Copy 16 bits twice... "bfm %2, %3, #0, #15\n\t" "lsl %2, %2, #16\n\t" "bfm %2, %3, #0, #15\n\t" // Copy 32 bits from the bottom of the reg. to the top... "lsl %2, %2, #32\n\t" "bfm %2, %2, #32, #63\n" // Copy into dst 8 bytes (4 uint16s) at a time "1:\t" "cmp %1, #4\n\t" "b.lo 2f\n\t" "str %2, [%0], #8\n\t" "sub %1, %1, #4\n\t" "b 1b\n" // Copy into dst 4 bytes at a time "2:\t" "cmp %1, #2\n\t" "b.lo 3f\n\t" "str %w2, [%0], #4\n\t" "sub %1, %1, #2\n\t" "b 2b\n" // We have 1 or zero items left... "3:\t" "cbz %1, 4f\n\t" "strh %w2, [%0]\n" "4:" : "+r" (tmpDst), "+r" (count), "+r" (tmpVal) : "r" (val) : "cc", "memory"); #else size_t dummy0; void *dummy1; __asm__ __volatile__("\t" "cld" "\n\t" "rep ; stosw" "\n" : "=c" (dummy0), "=D" (dummy1) : "0" (count), "1" (dst), "a" (val) : "memory", "cc" ); #endif return dst; } /* *---------------------------------------------------------------------- * * uint32set -- * * memset a given address with an uint32 value, count times. * * Results: * Pointer to filled memory range. * * Side effects: * As with memset. * *---------------------------------------------------------------------- */ static INLINE void * uint32set(void *dst, uint32 val, size_t count) { #ifdef VM_ARM_32 void *tmpDst = dst; __asm__ __volatile__ ( "cmp %1, #0\n\t" "beq 2f\n" "1:\n\t" "str %2, [%0], #4\n\t" "subs %1, %1, #1\n\t" "bne 1b\n" "2:" : "+r" (tmpDst), "+r" (count) : "r" (val) : "cc", "memory"); #elif defined(VM_ARM_64) void *tmpDst = dst; if (count == 0) { return dst; } __asm__ __volatile__ ( "cbz %2, 1f\n\t" // Drop our value in the top 32 bits, then copy from there to the bottom "lsl %2, %2, #32\n\t" "bfm %2, %2, #32, #63\n" // Copy four at a time "1:\t" "cmp %1, #16\n\t" "b.lo 2f\n\t" "stp %2, %2, [%0], #16\n\t" "stp %2, %2, [%0], #16\n\t" "stp %2, %2, [%0], #16\n\t" "stp %2, %2, [%0], #16\n\t" "sub %1, %1, #16\n\t" "b 1b\n" // Copy remaining pairs of data "2:\t" "cmp %1, #2\n\t" "b.lo 3f\n\t" "str %2, [%0], #8\n\t" "sub %1, %1, #2\n\t" "b 2b\n" // One or zero values left to copy "3:\t" "cbz %1, 4f\n\t" "str %w2, [%0]\n\t" // No incr "4:" : "+r" (tmpDst), "+r" (count), "+r" (val) : : "cc", "memory"); #else size_t dummy0; void *dummy1; __asm__ __volatile__("\t" "cld" "\n\t" "rep ; stosl" "\n" : "=c" (dummy0), "=D" (dummy1) : "0" (count), "1" (dst), "a" (val) : "memory", "cc" ); #endif return dst; } #else /* unknown system: rely on C to write */ static INLINE void * uint16set(void *dst, uint16 val, size_t count) { size_t i; for (i = 0; i < count; i++) { ((uint16 *) dst)[i] = val; } return dst; } static INLINE void * uint32set(void *dst, uint32 val, size_t count) { size_t i; for (i = 0; i < count; i++) { ((uint32 *) dst)[i] = val; } return dst; } #endif // defined(VM_X86_ANY) || defined(VM_ARM_ANY) #elif defined(_MSC_VER) static INLINE void * uint16set(void *dst, uint16 val, size_t count) { #ifdef VM_X86_64 __stosw((uint16*)dst, val, count); #elif defined(VM_ARM_32) size_t i; for (i = 0; i < count; i++) { ((uint16 *)dst)[i] = val; } #else __asm { pushf; mov ax, val; mov ecx, count; mov edi, dst; cld; rep stosw; popf; } #endif return dst; } static INLINE void * uint32set(void *dst, uint32 val, size_t count) { #ifdef VM_X86_64 __stosd((unsigned long*)dst, (unsigned long)val, count); #elif defined(VM_ARM_32) size_t i; for (i = 0; i < count; i++) { ((uint32 *)dst)[i] = val; } #else __asm { pushf; mov eax, val; mov ecx, count; mov edi, dst; cld; rep stosd; popf; } #endif return dst; } #else #error "No compiler defined for uint*set" #endif /* *----------------------------------------------------------------------------- * * Bswap16 -- * * Swap the 2 bytes of "v" as follows: 32 -> 23. * *----------------------------------------------------------------------------- */ static INLINE uint16 Bswap16(uint16 v) { #if defined(VM_ARM_64) __asm__("rev16 %w0, %w0" : "+r"(v)); return v; #else return ((v >> 8) & 0x00ff) | ((v << 8) & 0xff00); #endif } /* *----------------------------------------------------------------------------- * * Bswap32 -- * * Swap the 4 bytes of "v" as follows: 3210 -> 0123. * *----------------------------------------------------------------------------- */ static INLINE uint32 Bswap32(uint32 v) // IN { #if defined(__GNUC__) && defined(VM_X86_ANY) /* Checked against the Intel manual and GCC. --hpreg */ __asm__( "bswap %0" : "=r" (v) : "0" (v) ); return v; #elif defined(VM_ARM_32) && !defined(__ANDROID__) && !defined(_MSC_VER) __asm__("rev %0, %0" : "+r"(v)); return v; #elif defined(VM_ARM_64) __asm__("rev32 %x0, %x0" : "+r"(v)); return v; #else return (v >> 24) | ((v >> 8) & 0xFF00) | ((v & 0xFF00) << 8) | (v << 24) ; #endif } #define Bswap Bswap32 /* *----------------------------------------------------------------------------- * * Bswap64 -- * * Swap the 8 bytes of "v" as follows: 76543210 -> 01234567. * *----------------------------------------------------------------------------- */ static INLINE uint64 Bswap64(uint64 v) // IN { #if defined(VM_ARM_64) __asm__("rev %0, %0" : "+r"(v)); return v; #else return ((uint64)Bswap((uint32)v) << 32) | Bswap((uint32)(v >> 32)); #endif } /* * PAUSE is a P4 instruction that improves spinlock power+performance; * on non-P4 IA32 systems, the encoding is interpreted as a REPZ-NOP. * Use volatile to avoid NOP removal. */ static INLINE void PAUSE(void) #if defined(__GNUC__) || defined(VM_ARM_32) { #ifdef VM_ARM_ANY /* * ARM has no instruction to execute "spin-wait loop", just leave it * empty. */ #else __asm__ __volatile__( "pause" :); #endif } #elif defined(_MSC_VER) #ifdef VM_X86_64 { _mm_pause(); } #else /* VM_X86_64 */ #pragma warning( disable : 4035) { __asm _emit 0xf3 __asm _emit 0x90 } #pragma warning (default: 4035) #endif /* VM_X86_64 */ #else /* __GNUC__ */ #error No compiler defined for PAUSE #endif /* * Checked against the Intel manual and GCC --hpreg * * volatile because the tsc always changes without the compiler knowing it. */ static INLINE uint64 RDTSC(void) #ifdef __GNUC__ { #ifdef VM_X86_64 uint64 tscLow; uint64 tscHigh; __asm__ __volatile__( "rdtsc" : "=a" (tscLow), "=d" (tscHigh) ); return tscHigh << 32 | tscLow; #elif defined(VM_X86_32) uint64 tim; __asm__ __volatile__( "rdtsc" : "=A" (tim) ); return tim; #elif defined(VM_ARM_64) #if (defined(VMKERNEL) || defined(VMM)) && !defined(VMK_ARM_EL1) return MRS(CNTPCT_EL0); #else return MRS(CNTVCT_EL0); #endif #else /* * For platform without cheap timer, just return 0. */ return 0; #endif } #elif defined(_MSC_VER) #ifdef VM_X86_64 { return __rdtsc(); } #elif defined(VM_ARM_32) { /* * We need to do more inverstagetion here to find * a microsoft equivalent of that code */ NOT_IMPLEMENTED(); return 0; } #else #pragma warning( disable : 4035) { __asm _emit 0x0f __asm _emit 0x31 } #pragma warning (default: 4035) #endif /* VM_X86_64 */ #else /* __GNUC__ */ #error No compiler defined for RDTSC #endif /* __GNUC__ */ /* *----------------------------------------------------------------------------- * * DEBUGBREAK -- * * Does an int3 for MSVC / GCC, bkpt/brk for ARM. This is a macro to make * sure int3 is always inlined. * *----------------------------------------------------------------------------- */ #ifdef VM_ARM_32 #define DEBUGBREAK() __asm__("bkpt") #elif defined(VM_ARM_64) #define DEBUGBREAK() __asm__("brk #0") #elif defined(_MSC_VER) #define DEBUGBREAK() __debugbreak() #else #define DEBUGBREAK() __asm__("int $3") #endif /* *----------------------------------------------------------------------------- * * {Clear,Set,Test}Bit{32,64} -- * * Sets or clears a specified single bit in the provided variable. * * The index input value specifies which bit to modify and is 0-based. * Index is truncated by hardware to a 5-bit or 6-bit offset for the * 32 and 64-bit flavors, respectively, but input values are not validated * with asserts to avoid include dependencies. * * 64-bit flavors are not handcrafted for 32-bit builds because they may * defeat compiler optimizations. * *----------------------------------------------------------------------------- */ static INLINE void SetBit32(uint32 *var, uint32 index) { #if defined(__GNUC__) && defined(VM_X86_ANY) __asm__ ( "bts %1, %0" : "+mr" (*var) : "rI" (index) : "cc" ); #elif defined(_MSC_VER) _bittestandset((long *)var, index); #else *var |= (1 << index); #endif } static INLINE void ClearBit32(uint32 *var, uint32 index) { #if defined(__GNUC__) && defined(VM_X86_ANY) __asm__ ( "btr %1, %0" : "+mr" (*var) : "rI" (index) : "cc" ); #elif defined(_MSC_VER) _bittestandreset((long *)var, index); #else *var &= ~(1 << index); #endif } static INLINE void SetBit64(uint64 *var, uint64 index) { #if defined(VM_64BIT) && !defined(VM_ARM_64) #ifdef __GNUC__ __asm__ ( "bts %1, %0" : "+mr" (*var) : "rJ" (index) : "cc" ); #elif defined(_MSC_VER) _bittestandset64((__int64 *)var, index); #endif #else *var |= ((uint64)1 << index); #endif } static INLINE void ClearBit64(uint64 *var, uint64 index) { #if defined(VM_64BIT) && !defined(VM_ARM_64) #ifdef __GNUC__ __asm__ ( "btrq %1, %0" : "+mr" (*var) : "rJ" (index) : "cc" ); #elif defined(_MSC_VER) _bittestandreset64((__int64 *)var, index); #endif #else *var &= ~((uint64)1 << index); #endif } static INLINE Bool TestBit32(const uint32 *var, uint32 index) { #if defined(__GNUC__) && defined(VM_X86_ANY) Bool bit; __asm__ ( "bt %[index], %[var] \n" "setc %[bit]" : [bit] "=qQm" (bit) : [index] "rI" (index), [var] "r" (*var) : "cc" ); return bit; #else return (*var & (1 << index)) != 0; #endif } static INLINE Bool TestBit64(const uint64 *var, uint64 index) { #if defined __GNUC__ && defined VM_X86_64 Bool bit; __asm__ ( "bt %[index], %[var] \n" "setc %[bit]" : [bit] "=qQm" (bit) : [index] "rJ" (index), [var] "r" (*var) : "cc" ); return bit; #else return (*var & (CONST64U(1) << index)) != 0; #endif } /* *----------------------------------------------------------------------------- * * {Clear,Set,Complement,Test}BitVector -- * * Sets, clears, complements, or tests a specified single bit in the * provided array. The index input value specifies which bit to modify * and is 0-based. Bit number can be +-2Gb (+-128MB) relative from 'var' * variable. * * All functions return value of the bit before modification was performed. * *----------------------------------------------------------------------------- */ static INLINE Bool SetBitVector(void *var, int32 index) { #if defined(__GNUC__) && defined(VM_X86_ANY) Bool bit; __asm__ ( "bts %2, %1;" "setc %0" : "=qQm" (bit), "+m" (*(uint32 *)var) : "rI" (index) : "memory", "cc" ); return bit; #elif defined(_MSC_VER) return _bittestandset((long *)var, index) != 0; #else Bool retVal = (((uint8 *)var)[index / 8] & (1 << (index % 8))) != 0; ((uint8 *)var)[index / 8] |= 1 << (index % 8); return retVal; #endif } static INLINE Bool ClearBitVector(void *var, int32 index) { #if defined(__GNUC__) && defined(VM_X86_ANY) Bool bit; __asm__ ( "btr %2, %1;" "setc %0" : "=qQm" (bit), "+m" (*(uint32 *)var) : "rI" (index) : "memory", "cc" ); return bit; #elif defined(_MSC_VER) return _bittestandreset((long *)var, index) != 0; #else Bool retVal = (((uint8 *)var)[index / 8] & (1 << (index % 8))) != 0; ((uint8 *)var)[index / 8] &= ~(1 << (index % 8)); return retVal; #endif } static INLINE Bool ComplementBitVector(void *var, int32 index) { #if defined(__GNUC__) && defined(VM_X86_ANY) Bool bit; __asm__ ( "btc %2, %1;" "setc %0" : "=qQm" (bit), "+m" (*(uint32 *)var) : "rI" (index) : "memory", "cc" ); return bit; #elif defined(_MSC_VER) return _bittestandcomplement((long *)var, index) != 0; #else Bool retVal = (((uint8 *)var)[index / 8] & (1 << (index % 8))) != 0; ((uint8 *)var)[index / 8] ^= ~(1 << (index % 8)); return retVal; #endif } static INLINE Bool TestBitVector(const void *var, int32 index) { #if defined(__GNUC__) && defined(VM_X86_ANY) Bool bit; __asm__ ( "bt %2, %1;" "setc %0" : "=qQm" (bit) : "m" (*(const uint32 *)var), "rI" (index) : "cc" ); return bit; #elif defined _MSC_VER return _bittest((long *)var, index) != 0; #else return (((const uint8 *)var)[index / 8] & (1 << (index % 8))) != 0; #endif } /* *----------------------------------------------------------------------------- * RoundUpPow2_{64,32} -- * * Rounds a value up to the next higher power of 2. Returns the original * value if it is a power of 2. The next power of 2 for inputs {0, 1} is 1. * The result is undefined for inputs above {2^63, 2^31} (but equal to 1 * in this implementation). *----------------------------------------------------------------------------- */ static INLINE uint64 RoundUpPow2C64(uint64 value) { if (value <= 1 || value > (CONST64U(1) << 63)) { return 1; // Match the assembly's undefined value for large inputs. } else { return (CONST64U(2) << mssb64_0(value - 1)); } } #if defined(__GNUC__) && defined(VM_X86_64) static INLINE uint64 RoundUpPow2Asm64(uint64 value) { uint64 out = 2; __asm__("lea -1(%[in]), %%rcx;" // rcx = value - 1. Preserve original. "bsr %%rcx, %%rcx;" // rcx = log2(value - 1) if value != 1 // if value == 0, then rcx = 63 // if value == 1 then zf = 1, else zf = 0. "rol %%cl, %[out];" // out = 2 << rcx (if rcx != -1) // = 2^(log2(value - 1) + 1) // if rcx == -1 (value == 0), out = 1 // zf is always unmodified. "cmovz %[in], %[out]" // if value == 1 (zf == 1), write 1 to out. : [out]"+r"(out) : [in]"r"(value) : "%rcx", "cc"); return out; } #endif static INLINE uint64 RoundUpPow2_64(uint64 value) { #if defined(__GNUC__) && defined(VM_X86_64) if (__builtin_constant_p(value)) { return RoundUpPow2C64(value); } else { return RoundUpPow2Asm64(value); } #else return RoundUpPow2C64(value); #endif } static INLINE uint32 RoundUpPow2C32(uint32 value) { if (value <= 1 || value > (1U << 31)) { return 1; // Match the assembly's undefined value for large inputs. } else { return (2 << mssb32_0(value - 1)); } } #ifdef __GNUC__ static INLINE uint32 RoundUpPow2Asm32(uint32 value) { #ifdef VM_ARM_32 uint32 out = 1; // Note: None Thumb only! // The value of the argument "value" // will be affected! __asm__("sub %[in], %[in], #1;" // r1 = value - 1 . if value == 0 then r1 = 0xFFFFFFFF "clz %[in], %[in];" // r1 = log2(value - 1) if value != 1 // if value == 0 then r1 = 0 // if value == 1 then r1 = 32 "mov %[out], %[out], ror %[in]" // out = 2^(32 - r1) // if out == 2^32 then out = 1 as it is right rotate : [in]"+r"(value),[out]"+r"(out)); return out; #elif defined(VM_ARM_64) return RoundUpPow2C32(value); #else uint32 out = 2; __asm__("lea -1(%[in]), %%ecx;" // ecx = value - 1. Preserve original. "bsr %%ecx, %%ecx;" // ecx = log2(value - 1) if value != 1 // if value == 0, then ecx = 31 // if value == 1 then zf = 1, else zf = 0. "rol %%cl, %[out];" // out = 2 << ecx (if ecx != -1) // = 2^(log2(value - 1) + 1). // if ecx == -1 (value == 0), out = 1 // zf is always unmodified "cmovz %[in], %[out]" // if value == 1 (zf == 1), write 1 to out. : [out]"+r"(out) : [in]"r"(value) : "%ecx", "cc"); return out; #endif } #endif // __GNUC__ static INLINE uint32 RoundUpPow2_32(uint32 value) { #ifdef __GNUC__ if (__builtin_constant_p(value)) { return RoundUpPow2C32(value); } else { return RoundUpPow2Asm32(value); } #else return RoundUpPow2C32(value); #endif } #if defined __cplusplus } // extern "C" #endif #endif // _VM_BASIC_ASM_H_ vmhgfs-only/shared/vmciKernelAPI3.h 0000444 0000000 0000000 00000003150 13432725350 016161 0 ustar root root /********************************************************* * Copyright (C) 2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vmciKernelAPI3.h -- * * Kernel API (v3) exported from the VMCI host and guest drivers. */ #ifndef __VMCI_KERNELAPI_3_H__ #define __VMCI_KERNELAPI_3_H__ #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #include "includeCheck.h" #include "vmciKernelAPI2.h" #if defined __cplusplus extern "C" { #endif /* Define version 3. */ #undef VMCI_KERNEL_API_VERSION #define VMCI_KERNEL_API_VERSION_3 3 #define VMCI_KERNEL_API_VERSION VMCI_KERNEL_API_VERSION_3 /* VMCI Detach Cause API (only available in vmkernel). */ #define VMCI_DETACH_REGULAR 0 #define VMCI_DETACH_VMOTION 1 int vmci_qpair_get_detach_cause(VMCIQPair *qpair, uint8 *cause); #if defined __cplusplus } // extern "C" #endif #endif /* !__VMCI_KERNELAPI_3_H__ */ vmhgfs-only/shared/guest_msg_def.h 0000444 0000000 0000000 00000005647 13432725350 016275 0 ustar root root /********************************************************* * Copyright (C) 1998-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * guest_msg_def.h -- * * Second layer of the internal communication channel between guest * applications and vmware * */ #ifndef _GUEST_MSG_DEF_H_ #define _GUEST_MSG_DEF_H_ #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_USERLEVEL #include "includeCheck.h" /* Basic request types */ typedef enum { MESSAGE_TYPE_OPEN, MESSAGE_TYPE_SENDSIZE, MESSAGE_TYPE_SENDPAYLOAD, MESSAGE_TYPE_RECVSIZE, MESSAGE_TYPE_RECVPAYLOAD, MESSAGE_TYPE_RECVSTATUS, MESSAGE_TYPE_CLOSE, } MessageType; /* Reply statuses */ /* The basic request succeeded */ #define MESSAGE_STATUS_SUCCESS 0x0001 /* vmware has a message available for its party */ #define MESSAGE_STATUS_DORECV 0x0002 /* The channel has been closed */ #define MESSAGE_STATUS_CLOSED 0x0004 /* vmware removed the message before the party fetched it */ #define MESSAGE_STATUS_UNSENT 0x0008 /* A checkpoint occurred */ #define MESSAGE_STATUS_CPT 0x0010 /* An underlying device is powering off */ #define MESSAGE_STATUS_POWEROFF 0x0020 /* vmware has detected a timeout on the channel */ #define MESSAGE_STATUS_TIMEOUT 0x0040 /* vmware supports high-bandwidth for sending and receiving the payload */ #define MESSAGE_STATUS_HB 0x0080 /* * This mask defines the status bits that the guest is allowed to set; * we use this to mask out all other bits when receiving the status * from the guest. Otherwise, the guest can manipulate VMX state by * setting status bits that are only supposed to be changed by the * VMX. See bug 45385. */ #define MESSAGE_STATUS_GUEST_MASK MESSAGE_STATUS_SUCCESS /* * Max number of channels. * Unfortunately this has to be public because the monitor part * of the backdoor needs it for its trivial-case optimization. [greg] */ #define GUESTMSG_MAX_CHANNEL 8 /* Flags to open a channel. --hpreg */ #define GUESTMSG_FLAG_COOKIE 0x80000000 #define GUESTMSG_FLAG_ALL GUESTMSG_FLAG_COOKIE /* * Maximum size of incoming message. This is to prevent denial of host service * attacks from guest applications. */ #define GUESTMSG_MAX_IN_SIZE (64 * 1024) #endif /* _GUEST_MSG_DEF_H_ */ vmhgfs-only/shared/dbllnklst.h 0000444 0000000 0000000 00000015155 13432725346 015453 0 ustar root root /********************************************************* * Copyright (C) 1998-2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * dbllnklst.h -- * * Double linked lists */ #ifndef _DBLLNKLST_H_ #define _DBLLNKLST_H_ #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_USERLEVEL #include "includeCheck.h" #include "vm_basic_types.h" #if defined(__cplusplus) extern "C" { #endif #define DblLnkLst_OffsetOf(type, field) ((intptr_t)&((type *)0)->field) #define DblLnkLst_Container(addr, type, field) \ ((type *)((char *)(addr) - DblLnkLst_OffsetOf(type, field))) #define DblLnkLst_ForEach(curr, head) \ for (curr = (head)->next; curr != (head); curr = (curr)->next) /* Safe from list element removal within loop body. */ #define DblLnkLst_ForEachSafe(curr, nextElem, head) \ for (curr = (head)->next, nextElem = (curr)->next; \ curr != (head); \ curr = nextElem, nextElem = (curr)->next) typedef struct DblLnkLst_Links { struct DblLnkLst_Links *prev; struct DblLnkLst_Links *next; } DblLnkLst_Links; /* * Functions * * DblLnkLst_LinkFirst, DblLnkLst_LinkLast, and DblLnkLst_Swap are specific * to anchored lists. The rest are for both circular and anchored lists. */ /* *---------------------------------------------------------------------- * * DblLnkLst_Init -- * * Initialize a member of a doubly linked list * * Result * None * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE void DblLnkLst_Init(DblLnkLst_Links *l) // OUT { l->prev = l->next = l; } /* *---------------------------------------------------------------------- * * DblLnkLst_Link -- * * Merge two doubly linked lists into one * * The operation is commutative * The operation is inversible (its inverse is DblLnkLst_Unlink) * * Result * None * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE void DblLnkLst_Link(DblLnkLst_Links *l1, // IN/OUT DblLnkLst_Links *l2) // IN/OUT { DblLnkLst_Links *tmp; (tmp = l1->prev)->next = l2; (l1->prev = l2->prev)->next = l1; l2->prev = tmp ; } /* *---------------------------------------------------------------------- * * DblLnkLst_Unlink -- * * Split one doubly linked list into two * * No check is performed: the caller must ensure that both members * belong to the same doubly linked list * * The operation is commutative * The operation is inversible (its inverse is DblLnkLst_Link) * * Result * None * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE void DblLnkLst_Unlink(DblLnkLst_Links *l1, // IN/OUT DblLnkLst_Links *l2) // IN/OUT { DblLnkLst_Links *tmp; tmp = l1->prev ; (l1->prev = l2->prev)->next = l1; (l2->prev = tmp )->next = l2; } /* *---------------------------------------------------------------------- * * DblLnkLst_Unlink1 -- * * Unlink an element from its list. * * Result * None * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE void DblLnkLst_Unlink1(DblLnkLst_Links *l) // IN/OUT { DblLnkLst_Unlink(l, l->next); } /* *---------------------------------------------------------------------------- * * DblLnkLst_IsLinked -- * * Determines whether an element is linked with any other elements. * * Results: * TRUE if link is linked, FALSE otherwise. * * Side effects: * None. * *---------------------------------------------------------------------------- */ static INLINE Bool DblLnkLst_IsLinked(DblLnkLst_Links const *l) // IN { /* * A DblLnkLst_Links is either linked to itself (not linked) or linked to * other elements in a list (linked). */ return l->prev != l; } /* *---------------------------------------------------------------------- * * DblLnkLst_LinkFirst -- * * Insert 'l' at the beginning of the list anchored at 'head' * * Result * None * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE void DblLnkLst_LinkFirst(DblLnkLst_Links *head, // IN/OUT DblLnkLst_Links *l) // IN/OUT { DblLnkLst_Link(head->next, l); } /* *---------------------------------------------------------------------- * * DblLnkLst_LinkLast -- * * Insert 'l' at the end of the list anchored at 'head' * * Result * None * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE void DblLnkLst_LinkLast(DblLnkLst_Links *head, // IN/OUT DblLnkLst_Links *l) // IN/OUT { DblLnkLst_Link(head, l); } /* *---------------------------------------------------------------------- * * DblLnkLst_Swap -- * * Swap all entries between the list anchored at 'head1' and the list * anchored at 'head2'. * * The operation is commutative * The operation is inversible (its inverse is itself) * * Result * None * * Side effects: * None * *---------------------------------------------------------------------- */ static INLINE void DblLnkLst_Swap(DblLnkLst_Links *head1, // IN/OUT DblLnkLst_Links *head2) // IN/OUT { DblLnkLst_Links const tmp = *head1; if (DblLnkLst_IsLinked(head2)) { (head1->prev = head2->prev)->next = head1; (head1->next = head2->next)->prev = head1; } else { DblLnkLst_Init(head1); } if (tmp.prev != head1) { (head2->prev = tmp.prev)->next = head2; (head2->next = tmp.next)->prev = head2; } else { DblLnkLst_Init(head2); } } #if defined(__cplusplus) } // extern "C" #endif #endif /* _DBLLNKLST_H_ */ vmhgfs-only/shared/compat_scsi.h 0000444 0000000 0000000 00000003024 13432725347 015757 0 ustar root root /********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_SCSI_H__ # define __COMPAT_SCSI_H__ /* The scsi_bufflen() API appeared somewhere in time --hpreg */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23) # define scsi_bufflen(cmd) ((cmd)->request_bufflen) # define scsi_sg_count(cmd) ((cmd)->use_sg) # define scsi_sglist(cmd) ((struct scatterlist *)(cmd)->request_buffer) # define scsi_set_resid(cmd, _resid) ((cmd)->resid = _resid) #endif /* * Using scsi_sglist to access the request buffer looks strange * so instead we define this macro. What happened is later kernel * put all SCSI data in sglists, since it simplifies passing buffers */ #define scsi_request_buffer(cmd) scsi_sglist(cmd) #endif /* __COMPAT_SCSI_H__ */ vmhgfs-only/shared/vm_basic_math.h 0000444 0000000 0000000 00000010721 13432725350 016243 0 ustar root root /********************************************************* * Copyright (C) 2008-2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * vm_basic_math.h -- * * Standard mathematical macros for VMware source code. */ #ifndef _VM_BASIC_MATH_H_ #define _VM_BASIC_MATH_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_VMKDRIVERS #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" #include "vm_basic_types.h" // For INLINE. #include "vm_basic_asm.h" // For Div64... #if defined __cplusplus extern "C" { #endif static INLINE uint32 RatioOf(uint32 numer1, uint32 numer2, uint32 denom) { uint64 numer = (uint64)numer1 * numer2; /* Calculate "(numer1 * numer2) / denom" avoiding round-off errors. */ #if defined(VMM) || !(defined(__i386__) || defined(__x86_64__)) return numer / denom; #else uint32 ratio; uint32 unused; Div643232(numer, denom, &ratio, &unused); return ratio; #endif } static INLINE uint32 ExponentialAvg(uint32 avg, uint32 value, uint32 gainNumer, uint32 gainDenom) { uint32 term1 = gainNumer * avg; uint32 term2 = (gainDenom - gainNumer) * value; return (term1 + term2) / gainDenom; } /* *----------------------------------------------------------------------------- * * IsZeroOrPowerOfTwo -- * IsZeroOrPowerOfTwo64 -- * * Results: * TRUE iff the value is 0 or a power of two. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE Bool IsZeroOrPowerOfTwo64(uint64 x) { return !(x & (x - 1)); } static INLINE Bool IsZeroOrPowerOfTwo(uint32 x) // IN { return !(x & (x - 1)); } static INLINE uint32 GetPowerOfTwo(uint32 x) { /* Returns next-greatest power-of-two. */ uint32 power2 = 1; while (x > power2) { power2 = power2 << 1; } return power2; } #if !defined(_WIN32) && !defined(_WIN64) /* *----------------------------------------------------------------------------- * * RotateLeft32 -- * * Results: * Value rotated to the left by 'shift' bits. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE uint32 RotateLeft32(uint32 value, uint8 shift) { return ((value << shift) | (value >> (32 - shift))); } /* *----------------------------------------------------------------------------- * * RotateRight32 -- * * Results: * Value rotated to the right by 'shift' bits. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE uint32 RotateRight32(uint32 value, uint8 shift) { return ((value >> shift) | (value << (32 - shift))); } /* *----------------------------------------------------------------------------- * * RotateLeft64 -- * * Results: * Value rotated to the left by 'shift' bits. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE uint64 RotateLeft64(uint64 value, uint8 shift) { return ((value << shift) | (value >> (64 - shift))); } /* *----------------------------------------------------------------------------- * * RotateRight64 -- * * Results: * Value rotated to the right by 'shift' bits. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static INLINE uint64 RotateRight64(uint64 value, uint8 shift) { return ((value >> shift) | (value << (64 - shift))); } #endif // if !defined(_WIN32) && !defined(_WIN64) #if defined __cplusplus } // extern "C" #endif #endif // ifndef _VM_BASIC_MATH_H_ vmhgfs-only/shared/compat_slab.h 0000444 0000000 0000000 00000006653 13432725347 015752 0 ustar root root /********************************************************* * Copyright (C) 2005 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_SLAB_H__ # define __COMPAT_SLAB_H__ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 2, 0) # include <linux/slab.h> #else # include <linux/malloc.h> #endif /* * Before 2.6.20, kmem_cache_t was the accepted way to refer to a kmem_cache * structure. Prior to 2.6.15, this structure was called kmem_cache_s, and * afterwards it was renamed to kmem_cache. Here we keep things simple and use * the accepted typedef until it became deprecated, at which point we switch * over to the kmem_cache name. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) # define compat_kmem_cache struct kmem_cache #else # define compat_kmem_cache kmem_cache_t #endif /* * Up to 2.6.22 kmem_cache_create has 6 arguments - name, size, alignment, flags, * constructor, and destructor. Then for some time kernel was asserting that * destructor is NULL, and since 2.6.23-pre1 kmem_cache_create takes only 5 * arguments - destructor is gone. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 22) || defined(VMW_KMEMCR_HAS_DTOR) #define compat_kmem_cache_create(name, size, align, flags, ctor) \ kmem_cache_create(name, size, align, flags, ctor, NULL) #else #define compat_kmem_cache_create(name, size, align, flags, ctor) \ kmem_cache_create(name, size, align, flags, ctor) #endif /* * Up to 2.6.23 kmem_cache constructor has three arguments - pointer to block to * prepare (aka "this"), from which cache it came, and some unused flags. After * 2.6.23 flags were removed, and order of "this" and cache parameters was swapped... * Since 2.6.27-rc2 everything is different again, and ctor has only one argument. * * HAS_3_ARGS has precedence over HAS_2_ARGS if both are defined. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 23) && !defined(VMW_KMEMCR_CTOR_HAS_3_ARGS) # define VMW_KMEMCR_CTOR_HAS_3_ARGS #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 26) && !defined(VMW_KMEMCR_CTOR_HAS_2_ARGS) # define VMW_KMEMCR_CTOR_HAS_2_ARGS #endif #if defined(VMW_KMEMCR_CTOR_HAS_3_ARGS) typedef void compat_kmem_cache_ctor(void *, compat_kmem_cache *, unsigned long); #define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) void *arg, \ compat_kmem_cache *cache, \ unsigned long flags #elif defined(VMW_KMEMCR_CTOR_HAS_2_ARGS) typedef void compat_kmem_cache_ctor(compat_kmem_cache *, void *); #define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) compat_kmem_cache *cache, \ void *arg #else typedef void compat_kmem_cache_ctor(void *); #define COMPAT_KMEM_CACHE_CTOR_ARGS(arg) void *arg #endif #endif /* __COMPAT_SLAB_H__ */ vmhgfs-only/shared/compat_mm.h 0000444 0000000 0000000 00000003002 13432725347 015423 0 ustar root root /********************************************************* * Copyright (C) 2002 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_MM_H__ # define __COMPAT_MM_H__ #include <linux/mm.h> /* 2.2.x uses 0 instead of some define */ #ifndef NOPAGE_SIGBUS #define NOPAGE_SIGBUS (0) #endif /* 2.2.x does not have HIGHMEM support */ #ifndef GFP_HIGHUSER #define GFP_HIGHUSER (GFP_USER) #endif /* * In 2.4.14, the logic behind the UnlockPage macro was moved to the * unlock_page() function. Later (in 2.5.12), the UnlockPage macro was removed * altogether, and nowadays everyone uses unlock_page(). */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 14) #define compat_unlock_page(page) UnlockPage(page) #else #define compat_unlock_page(page) unlock_page(page) #endif #endif /* __COMPAT_MM_H__ */ vmhgfs-only/shared/driver-config.h 0000444 0000000 0000000 00000004314 13432725347 016214 0 ustar root root /********************************************************* * Copyright (C) 1998 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * Sets the proper defines from the Linux header files * * This file must be included before the inclusion of any kernel header file, * with the exception of linux/autoconf.h and linux/version.h --hpreg */ #ifndef __VMX_CONFIG_H__ #define __VMX_CONFIG_H__ #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMKDRIVERS #include "includeCheck.h" #include "compat_version.h" #include "compat_autoconf.h" /* * We rely on Kernel Module support. Check here. */ #ifndef CONFIG_MODULES # error "No Module support in this kernel. Please configure with CONFIG_MODULES" #endif /* * 2.2 kernels still use __SMP__ (derived from CONFIG_SMP * in the main Makefile), so we do it here. */ #ifdef CONFIG_SMP # define __SMP__ 1 #endif #if defined(CONFIG_MODVERSIONS) && defined(KERNEL_2_1) # if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,60) /* * MODVERSIONS might be already defined when using kernel's Makefiles. */ # ifndef MODVERSIONS # define MODVERSIONS # endif # include <linux/modversions.h> # endif #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 24) /* * Force the uintptr_t definition to come from linux/types.h instead of vm_basic_types.h. */ # include <linux/types.h> # define _STDINT_H 1 #endif #ifndef __KERNEL__ # define __KERNEL__ #endif #endif vmhgfs-only/shared/x86cpuid_asm.h 0000444 0000000 0000000 00000023356 13432725350 015771 0 ustar root root /********************************************************* * Copyright (C) 2003-2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * x86cpuid_asm.h * * CPUID-related assembly functions. */ #ifndef _X86CPUID_ASM_H_ #define _X86CPUID_ASM_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMK_MODULE #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #define INCLUDE_ALLOW_VMCORE #include "includeCheck.h" #include "vm_basic_asm.h" #include "x86cpuid.h" /* * x86-64 windows doesn't support inline asm so we have to use these * intrinsic functions defined in the compiler. Not all of these are well * documented. There is an array in the compiler dll (c1.dll) which has * an array of the names of all the intrinsics minus the leading * underscore. Searching around in the ntddk.h file can also be helpful. * * The declarations for the intrinsic functions were taken from the DDK. * Our declarations must match the ddk's otherwise the 64-bit c++ compiler * will complain about second linkage of the intrinsic functions. * We define the intrinsic using the basic types corresponding to the * Windows typedefs. This avoids having to include windows header files * to get to the windows types. */ #ifdef _MSC_VER #ifdef __cplusplus extern "C" { #endif #ifdef VM_X86_64 /* * intrinsic functions only supported by x86-64 windows as of 2k3sp1 */ void __cpuid(int regs[4], int eax); #pragma intrinsic(__cpuid) /* * __cpuidex has been supported since VS2008 */ #if _MSC_VER >= 1500 void __cpuidex(int regs[4], int eax, int ecx); #pragma intrinsic(__cpuidex) #endif /* _MSC_VER >= 1500 */ #endif /* VM_X86_64 */ #ifdef __cplusplus } #endif #endif /* _MSC_VER */ #ifdef __GNUC__ // { /* * Checked against the Intel manual and GCC --hpreg * * Need __volatile__ and "memory" since CPUID has a synchronizing effect. * The CPUID may also change at runtime (APIC flag, etc). * */ /* * %ebx is reserved on i386 PIC. Apple's gcc-5493 (gcc 4.0) compiling * for x86_64 incorrectly errors out saying %ebx is reserved. This is * Apple bug 7304232. */ #if vm_x86_64 ? (defined __APPLE_CC__ && __APPLE_CC__ == 5493) : defined __PIC__ #if vm_x86_64 /* * Note that this generates movq %rbx,%rbx; cpuid; xchgq %rbx,%rbx ... * Unfortunately Apple's assembler does not have .ifnes, and I cannot * figure out how to do that with .if. If we ever enable this code * on other 64bit systems, both movq & xchgq should be surrounded by * .ifnes \"%%rbx\", \"%q1\" & .endif */ #define VM_CPUID_BLOCK "movq %%rbx, %q1\n\t" \ "cpuid\n\t" \ "xchgq %%rbx, %q1\n\t" #define VM_EBX_OUT(reg) "=&r"(reg) #else #define VM_CPUID_BLOCK "movl %%ebx, %1\n\t" \ "cpuid\n\t" \ "xchgl %%ebx, %1\n\t" #define VM_EBX_OUT(reg) "=&rm"(reg) #endif #else #define VM_CPUID_BLOCK "cpuid" #define VM_EBX_OUT(reg) "=b"(reg) #endif static INLINE void __GET_CPUID(int eax, // IN CPUIDRegs *regs) // OUT { __asm__ __volatile__( VM_CPUID_BLOCK : "=a" (regs->eax), VM_EBX_OUT(regs->ebx), "=c" (regs->ecx), "=d" (regs->edx) : "a" (eax) : "memory" ); } static INLINE void __GET_CPUID2(int eax, // IN int ecx, // IN CPUIDRegs *regs) // OUT { __asm__ __volatile__( VM_CPUID_BLOCK : "=a" (regs->eax), VM_EBX_OUT(regs->ebx), "=c" (regs->ecx), "=d" (regs->edx) : "a" (eax), "c" (ecx) : "memory" ); } static INLINE uint32 __GET_EAX_FROM_CPUID(int eax) // IN { uint32 ebx; __asm__ __volatile__( VM_CPUID_BLOCK : "=a" (eax), VM_EBX_OUT(ebx) : "a" (eax) : "memory", "%ecx", "%edx" ); return eax; } static INLINE uint32 __GET_EBX_FROM_CPUID(int eax) // IN { uint32 ebx; __asm__ __volatile__( VM_CPUID_BLOCK : "=a" (eax), VM_EBX_OUT(ebx) : "a" (eax) : "memory", "%ecx", "%edx" ); return ebx; } static INLINE uint32 __GET_ECX_FROM_CPUID(int eax) // IN { uint32 ecx; uint32 ebx; __asm__ __volatile__( VM_CPUID_BLOCK : "=a" (eax), VM_EBX_OUT(ebx), "=c" (ecx) : "a" (eax) : "memory", "%edx" ); return ecx; } static INLINE uint32 __GET_EDX_FROM_CPUID(int eax) // IN { uint32 edx; uint32 ebx; __asm__ __volatile__( VM_CPUID_BLOCK : "=a" (eax), VM_EBX_OUT(ebx), "=d" (edx) : "a" (eax) : "memory", "%ecx" ); return edx; } static INLINE uint32 __GET_EAX_FROM_CPUID4(int ecx) // IN { uint32 eax; uint32 ebx; __asm__ __volatile__( VM_CPUID_BLOCK : "=a" (eax), VM_EBX_OUT(ebx), "=c" (ecx) : "a" (4), "c" (ecx) : "memory", "%edx" ); return eax; } #undef VM_CPUID_BLOCK #undef VM_EBX_OUT #elif defined(_MSC_VER) // } { static INLINE void __GET_CPUID(int input, CPUIDRegs *regs) { #ifdef VM_X86_64 __cpuid((int *)regs, input); #else __asm push esi __asm push ebx __asm push ecx __asm push edx __asm mov eax, input __asm mov esi, regs __asm _emit 0x0f __asm _emit 0xa2 __asm mov 0x0[esi], eax __asm mov 0x4[esi], ebx __asm mov 0x8[esi], ecx __asm mov 0xC[esi], edx __asm pop edx __asm pop ecx __asm pop ebx __asm pop esi #endif } #ifdef VM_X86_64 #if _MSC_VER >= 1500 /* * __cpuidex has been supported since VS2008 */ static INLINE void __GET_CPUID2(int inputEax, int inputEcx, CPUIDRegs *regs) { __cpuidex((int *)regs, inputEax, inputEcx); } #else // _MSC_VER >= 1500 /* * No inline assembly in Win64. Implemented in bora/lib/misc in * cpuidMasm64.asm. */ extern void __GET_CPUID2(int inputEax, int inputEcx, CPUIDRegs *regs); #endif // _MSC_VER >= 1500 #else // VM_X86_64 static INLINE void __GET_CPUID2(int inputEax, int inputEcx, CPUIDRegs *regs) { __asm push esi __asm push ebx __asm push ecx __asm push edx __asm mov eax, inputEax __asm mov ecx, inputEcx __asm mov esi, regs __asm _emit 0x0f __asm _emit 0xa2 __asm mov 0x0[esi], eax __asm mov 0x4[esi], ebx __asm mov 0x8[esi], ecx __asm mov 0xC[esi], edx __asm pop edx __asm pop ecx __asm pop ebx __asm pop esi } #endif static INLINE uint32 __GET_EAX_FROM_CPUID(int input) { #ifdef VM_X86_64 CPUIDRegs regs; __cpuid((int *)®s, input); return regs.eax; #else uint32 output; //NOT_TESTED(); __asm push ebx __asm push ecx __asm push edx __asm mov eax, input __asm _emit 0x0f __asm _emit 0xa2 __asm mov output, eax __asm pop edx __asm pop ecx __asm pop ebx return output; #endif } static INLINE uint32 __GET_EBX_FROM_CPUID(int input) { #ifdef VM_X86_64 CPUIDRegs regs; __cpuid((int *)®s, input); return regs.ebx; #else uint32 output; //NOT_TESTED(); __asm push ebx __asm push ecx __asm push edx __asm mov eax, input __asm _emit 0x0f __asm _emit 0xa2 __asm mov output, ebx __asm pop edx __asm pop ecx __asm pop ebx return output; #endif } static INLINE uint32 __GET_ECX_FROM_CPUID(int input) { #ifdef VM_X86_64 CPUIDRegs regs; __cpuid((int *)®s, input); return regs.ecx; #else uint32 output; //NOT_TESTED(); __asm push ebx __asm push ecx __asm push edx __asm mov eax, input __asm _emit 0x0f __asm _emit 0xa2 __asm mov output, ecx __asm pop edx __asm pop ecx __asm pop ebx return output; #endif } static INLINE uint32 __GET_EDX_FROM_CPUID(int input) { #ifdef VM_X86_64 CPUIDRegs regs; __cpuid((int *)®s, input); return regs.edx; #else uint32 output; //NOT_TESTED(); __asm push ebx __asm push ecx __asm push edx __asm mov eax, input __asm _emit 0x0f __asm _emit 0xa2 __asm mov output, edx __asm pop edx __asm pop ecx __asm pop ebx return output; #endif } #ifdef VM_X86_64 /* * No inline assembly in Win64. Implemented in bora/lib/misc in * cpuidMasm64.asm. */ extern uint32 __GET_EAX_FROM_CPUID4(int inputEcx); #else // VM_X86_64 static INLINE uint32 __GET_EAX_FROM_CPUID4(int inputEcx) { uint32 output; //NOT_TESTED(); __asm push ebx __asm push ecx __asm push edx __asm mov eax, 4 __asm mov ecx, inputEcx __asm _emit 0x0f __asm _emit 0xa2 __asm mov output, eax __asm pop edx __asm pop ecx __asm pop ebx return output; } #endif // VM_X86_64 #else // } #error #endif #define CPUID_FOR_SIDE_EFFECTS() ((void)__GET_EAX_FROM_CPUID(0)) /* The first parameter is used as an rvalue and then as an lvalue. */ #define GET_CPUID(_ax, _bx, _cx, _dx) { \ CPUIDRegs regs; \ __GET_CPUID(_ax, ®s); \ _ax = regs.eax; \ _bx = regs.ebx; \ _cx = regs.ecx; \ _dx = regs.edx; \ } #define GET_CPUID2(_ax, _bx, _cx, _dx) {\ CPUIDRegs regs; \ __GET_CPUID2(_ax, _cx, ®s); \ _ax = regs.eax; \ _bx = regs.ebx; \ _cx = regs.ecx; \ _dx = regs.edx; \ } #endif vmhgfs-only/shared/vmci_call_defs.h 0000444 0000000 0000000 00000023713 13432725350 016406 0 ustar root root /********************************************************* * Copyright (C) 2006-2016,2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef _VMCI_CALL_DEFS_H_ #define _VMCI_CALL_DEFS_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_VMMON #define INCLUDE_ALLOW_VMCORE #define INCLUDE_ALLOW_VMKMOD #define INCLUDE_ALLOW_VMKERNEL #define INCLUDE_ALLOW_DISTRIBUTE #include "includeCheck.h" #include "vm_basic_types.h" #include "vmci_defs.h" #if defined __cplusplus extern "C" { #endif /* * All structs here are an integral size of their largest member, ie. a struct * with at least one 8-byte member will have a size that is an integral of 8. * A struct which has a largest member of size 4 will have a size that is an * integral of 4. This is because Windows CL enforces this rule. 32 bit gcc * doesn't e.g. 32 bit gcc can misalign an 8 byte member if it is preceeded by * a 4 byte member. */ /* * Base struct for vmci datagrams. */ typedef struct VMCIDatagram { VMCIHandle dst; VMCIHandle src; uint64 payloadSize; } VMCIDatagram; /* * Second flag is for creating a well-known handle instead of a per context * handle. Next flag is for deferring datagram delivery, so that the * datagram callback is invoked in a delayed context (not interrupt context). */ #define VMCI_FLAG_DG_NONE 0 #define VMCI_FLAG_WELLKNOWN_DG_HND 0x1 #define VMCI_FLAG_ANYCID_DG_HND 0x2 #define VMCI_FLAG_DG_DELAYED_CB 0x4 /* Event callback should fire in a delayed context (not interrupt context.) */ #define VMCI_FLAG_EVENT_NONE 0 #define VMCI_FLAG_EVENT_DELAYED_CB 0x1 /* * Maximum supported size of a VMCI datagram for routable datagrams. * Datagrams going to the hypervisor are allowed to be larger. */ #define VMCI_MAX_DG_SIZE (17 * 4096) #define VMCI_MAX_DG_PAYLOAD_SIZE (VMCI_MAX_DG_SIZE - sizeof(VMCIDatagram)) #define VMCI_DG_PAYLOAD(_dg) (void *)((char *)(_dg) + sizeof(VMCIDatagram)) #define VMCI_DG_HEADERSIZE sizeof(VMCIDatagram) #define VMCI_DG_SIZE(_dg) (VMCI_DG_HEADERSIZE + (size_t)(_dg)->payloadSize) #define VMCI_DG_SIZE_ALIGNED(_dg) ((VMCI_DG_SIZE(_dg) + 7) & (size_t)~7) #define VMCI_MAX_DATAGRAM_QUEUE_SIZE (VMCI_MAX_DG_SIZE * 2) /* * We allow at least 1024 more event datagrams from the hypervisor past the * normally allowed datagrams pending for a given context. We define this * limit on event datagrams from the hypervisor to guard against DoS attack * from a malicious VM which could repeatedly attach to and detach from a queue * pair, causing events to be queued at the destination VM. However, the rate * at which such events can be generated is small since it requires a VM exit * and handling of queue pair attach/detach call at the hypervisor. Event * datagrams may be queued up at the destination VM if it has interrupts * disabled or if it is not draining events for some other reason. 1024 * datagrams is a grossly conservative estimate of the time for which * interrupts may be disabled in the destination VM, but at the same time does * not exacerbate the memory pressure problem on the host by much (size of each * event datagram is small). */ #define VMCI_MAX_DATAGRAM_AND_EVENT_QUEUE_SIZE \ (VMCI_MAX_DATAGRAM_QUEUE_SIZE + \ 1024 * (sizeof(VMCIDatagram) + sizeof(VMCIEventData_Max))) /* * Struct for sending VMCI_DATAGRAM_REQUEST_MAP and * VMCI_DATAGRAM_REMOVE_MAP datagrams. Struct size is 32 bytes. All * fields in struct are aligned to their natural alignment. These * datagrams are obsoleted by the removal of VM to VM communication. */ typedef struct VMCIDatagramWellKnownMapMsg { VMCIDatagram hdr; VMCIId wellKnownID; uint32 _pad; } VMCIDatagramWellKnownMapMsg; /* * Struct used for querying, via VMCI_RESOURCES_QUERY, the availability of * hypervisor resources. * Struct size is 16 bytes. All fields in struct are aligned to their natural * alignment. */ typedef struct VMCIResourcesQueryHdr { VMCIDatagram hdr; uint32 numResources; uint32 _padding; } VMCIResourcesQueryHdr; /* * Convenience struct for negotiating vectors. Must match layout of * VMCIResourceQueryHdr minus the VMCIDatagram header. */ typedef struct VMCIResourcesQueryMsg { uint32 numResources; uint32 _padding; VMCI_Resource resources[1]; } VMCIResourcesQueryMsg; /* * The maximum number of resources that can be queried using * VMCI_RESOURCE_QUERY is 31, as the result is encoded in the lower 31 * bits of a positive return value. Negative values are reserved for * errors. */ #define VMCI_RESOURCE_QUERY_MAX_NUM 31 /* Maximum size for the VMCI_RESOURCE_QUERY request. */ #define VMCI_RESOURCE_QUERY_MAX_SIZE sizeof(VMCIResourcesQueryHdr) \ + VMCI_RESOURCE_QUERY_MAX_NUM * sizeof(VMCI_Resource) /* * Struct used for setting the notification bitmap. All fields in * struct are aligned to their natural alignment. */ typedef struct VMCINotifyBitmapSetMsg { VMCIDatagram hdr; PPN32 bitmapPPN; uint32 _pad; } VMCINotifyBitmapSetMsg; /* * Struct used for linking a doorbell handle with an index in the * notify bitmap. All fields in struct are aligned to their natural * alignment. */ typedef struct VMCIDoorbellLinkMsg { VMCIDatagram hdr; VMCIHandle handle; uint64 notifyIdx; } VMCIDoorbellLinkMsg; /* * Struct used for unlinking a doorbell handle from an index in the * notify bitmap. All fields in struct are aligned to their natural * alignment. */ typedef struct VMCIDoorbellUnlinkMsg { VMCIDatagram hdr; VMCIHandle handle; } VMCIDoorbellUnlinkMsg; /* * Struct used for generating a notification on a doorbell handle. All * fields in struct are aligned to their natural alignment. */ typedef struct VMCIDoorbellNotifyMsg { VMCIDatagram hdr; VMCIHandle handle; } VMCIDoorbellNotifyMsg; /* * This struct is used to contain data for events. Size of this struct is a * multiple of 8 bytes, and all fields are aligned to their natural alignment. */ typedef struct VMCI_EventData { VMCI_Event event; /* 4 bytes. */ uint32 _pad; /* * Event payload is put here. */ } VMCI_EventData; /* Callback needed for correctly waiting on events. */ typedef int (*VMCIDatagramRecvCB)(void *clientData, // IN: client data for handler VMCIDatagram *msg); // IN: /* * We use the following inline function to access the payload data associated * with an event data. */ static INLINE void * VMCIEventDataPayload(VMCI_EventData *evData) // IN: { return (void *)((char *)evData + sizeof *evData); } /* * Define the different VMCI_EVENT payload data types here. All structs must * be a multiple of 8 bytes, and fields must be aligned to their natural * alignment. */ typedef struct VMCIEventPayload_Context { VMCIId contextID; /* 4 bytes. */ uint32 _pad; } VMCIEventPayload_Context; typedef struct VMCIEventPayload_QP { VMCIHandle handle; /* QueuePair handle. */ VMCIId peerId; /* Context id of attaching/detaching VM. */ uint32 _pad; } VMCIEventPayload_QP; /* * We define the following struct to get the size of the maximum event data * the hypervisor may send to the guest. If adding a new event payload type * above, add it to the following struct too (inside the union). */ typedef struct VMCIEventData_Max { VMCI_EventData eventData; union { VMCIEventPayload_Context contextPayload; VMCIEventPayload_QP qpPayload; } evDataPayload; } VMCIEventData_Max; /* * Struct used for VMCI_EVENT_SUBSCRIBE/UNSUBSCRIBE and VMCI_EVENT_HANDLER * messages. Struct size is 32 bytes. All fields in struct are aligned to * their natural alignment. */ typedef struct VMCIEventMsg { VMCIDatagram hdr; VMCI_EventData eventData; /* Has event type and payload. */ /* * Payload gets put here. */ } VMCIEventMsg; /* * We use the following inline function to access the payload data associated * with an event message. */ static INLINE void * VMCIEventMsgPayload(VMCIEventMsg *eMsg) // IN: { return VMCIEventDataPayload(&eMsg->eventData); } /* Flags for VMCI QueuePair API. */ #define VMCI_QPFLAG_ATTACH_ONLY 0x1 /* Fail alloc if QP not created by peer. */ #define VMCI_QPFLAG_LOCAL 0x2 /* Only allow attaches from local context. */ #define VMCI_QPFLAG_NONBLOCK 0x4 /* Host won't block when guest is quiesced. */ /* For asymmetric queuepairs, update as new flags are added. */ #define VMCI_QP_ASYMM VMCI_QPFLAG_NONBLOCK #define VMCI_QP_ASYMM_PEER (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QP_ASYMM) /* Update the following (bitwise OR flags) while adding new flags. */ #define VMCI_QP_ALL_FLAGS (VMCI_QPFLAG_ATTACH_ONLY | VMCI_QPFLAG_LOCAL | \ VMCI_QPFLAG_NONBLOCK) /* * Structs used for QueuePair alloc and detach messages. We align fields of * these structs to 64bit boundaries. */ typedef struct VMCIQueuePairAllocMsg { VMCIDatagram hdr; VMCIHandle handle; VMCIId peer; /* 32bit field. */ uint32 flags; uint64 produceSize; uint64 consumeSize; uint64 numPPNs; /* List of PPNs placed here. */ } VMCIQueuePairAllocMsg; typedef struct VMCIQueuePairDetachMsg { VMCIDatagram hdr; VMCIHandle handle; } VMCIQueuePairDetachMsg; #if defined __cplusplus } // extern "C" #endif #endif // _VMCI_CALL_DEFS_H_ vmhgfs-only/shared/compat_interrupt.h 0000444 0000000 0000000 00000003573 13432725347 017063 0 ustar root root /********************************************************* * Copyright (C) 2003 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef __COMPAT_INTERRUPT_H__ # define __COMPAT_INTERRUPT_H__ #include <linux/interrupt.h> #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 69) /* * We cannot just define irqreturn_t, as some 2.4.x kernels have * typedef void irqreturn_t; for "increasing" backward compatibility. */ typedef void compat_irqreturn_t; #define COMPAT_IRQ_NONE #define COMPAT_IRQ_HANDLED #define COMPAT_IRQ_RETVAL(x) #else typedef irqreturn_t compat_irqreturn_t; #define COMPAT_IRQ_NONE IRQ_NONE #define COMPAT_IRQ_HANDLED IRQ_HANDLED #define COMPAT_IRQ_RETVAL(x) IRQ_RETVAL(x) #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) #define COMPAT_IRQF_DISABLED SA_INTERRUPT #define COMPAT_IRQF_SHARED SA_SHIRQ #else #define COMPAT_IRQF_DISABLED IRQF_DISABLED #define COMPAT_IRQF_SHARED IRQF_SHARED #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) #define COMPAT_IRQ_HANDLER_ARGS(irq, devp) (int irq, void *devp, struct pt_regs *regs) #else #define COMPAT_IRQ_HANDLER_ARGS(irq, devp) (int irq, void *devp) #endif #endif /* __COMPAT_INTERRUPT_H__ */ vmhgfs-only/hgfsUtil.c 0000444 0000000 0000000 00000016634 13432725346 013777 0 ustar root root /********************************************************* * Copyright (C) 1998-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * hgfsUtil.c -- * * Utility routines used by both HGFS servers and clients, such as * conversion routines between Unix time and Windows NT time. * The former is in units of seconds since midnight 1/1/1970, while the * latter is in units of 100 nanoseconds since midnight 1/1/1601. */ /* * hgfsUtil.h must be included before vm_basic_asm.h, as hgfsUtil.h * includes kernel headers on Linux. That is, vmware.h must come after * hgfsUtil.h. */ #include "hgfsUtil.h" #include "vmware.h" #include "vm_basic_asm.h" #ifndef _WIN32 /* * NT time of the Unix epoch: * midnight January 1, 1970 UTC */ #define UNIX_EPOCH ((((uint64)369 * 365) + 89) * 24 * 3600 * 10000000) /* * NT time of the Unix 32 bit signed time_t wraparound: * 03:14:07 January 19, 2038 UTC */ #define UNIX_S32_MAX (UNIX_EPOCH + (uint64)0x80000000 * 10000000) /* *----------------------------------------------------------------------------- * * HgfsConvertToNtTime -- * * Convert from Unix time to Windows NT time. * * Results: * The time in Windows NT format. * * Side effects: * None * *----------------------------------------------------------------------------- */ uint64 HgfsConvertToNtTime(time_t unixTime, // IN: Time in Unix format (seconds) long nsec) // IN: nanoseconds { return (uint64)unixTime * 10000000 + nsec / 100 + UNIX_EPOCH; } /* *----------------------------------------------------------------------------- * * HgfsConvertFromNtTimeNsec -- * * Convert from Windows NT time to Unix time. If NT time is outside of * UNIX time range (1970-2038), returned time is nearest time valid in * UNIX. * * Results: * 0 on success * non-zero if NT time is outside of valid range for UNIX * * Side effects: * None * *----------------------------------------------------------------------------- */ int HgfsConvertFromNtTimeNsec(struct timespec *unixTime, // OUT: Time in UNIX format uint64 ntTime) // IN: Time in Windows NT format { #ifdef __i386__ uint32 sec; uint32 nsec; ASSERT(unixTime); /* We assume that time_t is 32bit */ ASSERT_ON_COMPILE(sizeof (unixTime->tv_sec) == 4); /* Cap NT time values that are outside of Unix time's range */ if (ntTime >= UNIX_S32_MAX) { unixTime->tv_sec = 0x7FFFFFFF; unixTime->tv_nsec = 0; return 1; } #else ASSERT(unixTime); #endif if (ntTime < UNIX_EPOCH) { unixTime->tv_sec = 0; unixTime->tv_nsec = 0; return -1; } #ifdef __i386__ Div643232(ntTime - UNIX_EPOCH, 10000000, &sec, &nsec); unixTime->tv_sec = sec; unixTime->tv_nsec = nsec * 100; #else unixTime->tv_sec = (ntTime - UNIX_EPOCH) / 10000000; unixTime->tv_nsec = ((ntTime - UNIX_EPOCH) % 10000000) * 100; #endif return 0; } /* *----------------------------------------------------------------------------- * * HgfsConvertFromNtTime -- * * Convert from Windows NT time to Unix time. * * Results: * 0 on success * nonzero if time is not representable on UNIX * * Side effects: * None * *----------------------------------------------------------------------------- */ int HgfsConvertFromNtTime(time_t *unixTime, // OUT: Time in UNIX format uint64 ntTime) // IN: Time in Windows NT format { struct timespec tm; int ret; ret = HgfsConvertFromNtTimeNsec(&tm, ntTime); *unixTime = tm.tv_sec; return ret; } #endif /* !def(_WIN32) */ #undef UNIX_EPOCH #undef UNIX_S32_MAX /* *----------------------------------------------------------------------------- * * HgfsConvertFromInternalStatus -- * * This function converts between a platform-specific status code and a * cross-platform status code to be sent down the wire. * * Results: * Converted status code. * * Side effects: * None. * *----------------------------------------------------------------------------- */ #ifdef _WIN32 HgfsStatus HgfsConvertFromInternalStatus(HgfsInternalStatus status) // IN { switch(status) { case ERROR_SUCCESS: return HGFS_STATUS_SUCCESS; case ERROR_FILE_NOT_FOUND: case ERROR_PATH_NOT_FOUND: return HGFS_STATUS_NO_SUCH_FILE_OR_DIR; case ERROR_INVALID_HANDLE: return HGFS_STATUS_INVALID_HANDLE; case ERROR_ALREADY_EXISTS: case ERROR_FILE_EXISTS: return HGFS_STATUS_FILE_EXISTS; case ERROR_DIR_NOT_EMPTY: return HGFS_STATUS_DIR_NOT_EMPTY; case RPC_S_PROTOCOL_ERROR: return HGFS_STATUS_PROTOCOL_ERROR; case ERROR_ACCESS_DENIED: return HGFS_STATUS_ACCESS_DENIED; case ERROR_INVALID_NAME: return HGFS_STATUS_INVALID_NAME; case ERROR_SHARING_VIOLATION: return HGFS_STATUS_SHARING_VIOLATION; case ERROR_DISK_FULL: case ERROR_HANDLE_DISK_FULL: return HGFS_STATUS_NO_SPACE; case ERROR_NOT_SUPPORTED: return HGFS_STATUS_OPERATION_NOT_SUPPORTED; case ERROR_INVALID_PARAMETER: return HGFS_STATUS_INVALID_PARAMETER; case ERROR_NOT_SAME_DEVICE: return HGFS_STATUS_NOT_SAME_DEVICE; case ERROR_FILENAME_EXCED_RANGE: return HGFS_STATUS_NAME_TOO_LONG; case ERROR_CONNECTION_INVALID: // HGFS_ERROR_STALE_SESSION return HGFS_STATUS_STALE_SESSION; case ERROR_MAX_SESSIONS_REACHED: return HGFS_STATUS_TOO_MANY_SESSIONS; case ERROR_INTERNAL_ERROR: case HGFS_INTERNAL_STATUS_ERROR: default: return HGFS_STATUS_GENERIC_ERROR; } } #else /* Win32 */ HgfsStatus HgfsConvertFromInternalStatus(HgfsInternalStatus status) // IN { switch(status) { case 0: return HGFS_STATUS_SUCCESS; case ENOENT: return HGFS_STATUS_NO_SUCH_FILE_OR_DIR; case EBADF: return HGFS_STATUS_INVALID_HANDLE; case EPERM: return HGFS_STATUS_OPERATION_NOT_PERMITTED; case EISDIR: case EEXIST: return HGFS_STATUS_FILE_EXISTS; case ENOTDIR: return HGFS_STATUS_NOT_DIRECTORY; case ENOTEMPTY: return HGFS_STATUS_DIR_NOT_EMPTY; case EPROTO: return HGFS_STATUS_PROTOCOL_ERROR; case EACCES: return HGFS_STATUS_ACCESS_DENIED; case EINVAL: return HGFS_STATUS_INVALID_NAME; case ENOSPC: return HGFS_STATUS_NO_SPACE; case EOPNOTSUPP: return HGFS_STATUS_OPERATION_NOT_SUPPORTED; case ENAMETOOLONG: return HGFS_STATUS_NAME_TOO_LONG; case EPARAMETERNOTSUPPORTED: return HGFS_STATUS_INVALID_PARAMETER; case EXDEV: return HGFS_STATUS_NOT_SAME_DEVICE; case ENETRESET: // HGFS_ERROR_STALE_SESSION return HGFS_STATUS_STALE_SESSION; case ECONNREFUSED: return HGFS_STATUS_TOO_MANY_SESSIONS; case EINTERNAL: case HGFS_INTERNAL_STATUS_ERROR: default: return HGFS_STATUS_GENERIC_ERROR; } } #endif vmhgfs-only/inode.c 0000444 0000000 0000000 00000221222 13432725306 013273 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * inode.c -- * * Inode operations for the filesystem portion of the vmhgfs driver. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include <linux/errno.h> #include <linux/pagemap.h> #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) #include <linux/namei.h> #endif #include <linux/highmem.h> #include <linux/time.h> // for current_fs_time #include "compat_cred.h" #include "compat_dcache.h" #include "compat_fs.h" #include "compat_kernel.h" #include "compat_mm.h" #include "compat_page-flags.h" #include "compat_spinlock.h" #include "compat_version.h" #include "cpName.h" #include "cpNameLite.h" #include "hgfsProto.h" #include "hgfsUtil.h" #include "inode.h" #include "module.h" #include "request.h" #include "fsutil.h" #include "vm_assert.h" #if defined VMW_DCOUNT_311 || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0) /* * Linux Kernel versions that are version 3.11 version and newer or are compatible * by having the d_count function replacement backported. */ #define hgfs_d_count(dentry) d_count(dentry) #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38) /* * Kernel versions that are not 3.11 version compatible or are just older will * use the d_count field. */ #define hgfs_d_count(dentry) dentry->d_count #else #define hgfs_d_count(dentry) atomic_read(&dentry->d_count) #endif #if defined VMW_DALIAS_319 || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) /* * Linux Kernel versions that are version 3.19 and newer or are compatible * by having the d_alias field moved into a union backported. */ #define hgfs_d_alias() d_u.d_alias #else /* * Kernel versions that are not 3.19 version compatible or are just older will * use the d_alias field directly. */ #define hgfs_d_alias() d_alias #endif /* Private functions. */ static int HgfsDelete(struct inode *dir, struct dentry *dentry, HgfsOp op); static int HgfsPackSetattrRequest(struct iattr *iattr, struct dentry *dentry, Bool allowHandleReuse, HgfsOp opUsed, HgfsReq *req, Bool *changed); static int HgfsPackCreateDirRequest(struct dentry *dentry, compat_umode_t mode, HgfsOp opUsed, HgfsReq *req); static int HgfsTruncatePages(struct inode *inode, loff_t newSize); static int HgfsPackSymlinkCreateRequest(struct dentry *dentry, const char *symname, HgfsOp opUsed, HgfsReq *req); /* HGFS inode operations. */ static int HgfsCreate(struct inode *dir, struct dentry *dentry, compat_umode_t mode, #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) bool excl #else struct nameidata *nd #endif ); static struct dentry *HgfsLookup(struct inode *dir, struct dentry *dentry, #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) unsigned int flags #else struct nameidata *nd #endif ); static int HgfsMkdir(struct inode *dir, struct dentry *dentry, compat_umode_t mode); static int HgfsRmdir(struct inode *dir, struct dentry *dentry); static int HgfsUnlink(struct inode *dir, struct dentry *dentry); static int HgfsRename(struct inode *oldDir, struct dentry *oldDentry, struct inode *newDir, struct dentry *newDentry); static int HgfsSymlink(struct inode *dir, struct dentry *dentry, const char *symname); #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) static int HgfsPermission(struct inode *inode, int mask, struct nameidata *nameidata); #elif defined(IPERM_FLAG_RCU) static int HgfsPermission(struct inode *inode, int mask, unsigned int flags); #else static int HgfsPermission(struct inode *inode, int mask); #endif static int HgfsGetattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat); #define HGFS_CREATE_DIR_MASK (HGFS_CREATE_DIR_VALID_FILE_NAME | \ HGFS_CREATE_DIR_VALID_SPECIAL_PERMS | \ HGFS_CREATE_DIR_VALID_OWNER_PERMS | \ HGFS_CREATE_DIR_VALID_GROUP_PERMS | \ HGFS_CREATE_DIR_VALID_OTHER_PERMS) /* HGFS inode operations structure for directories. */ struct inode_operations HgfsDirInodeOperations = { /* Optional */ .create = HgfsCreate, /* Optional */ .mkdir = HgfsMkdir, .lookup = HgfsLookup, .rmdir = HgfsRmdir, .unlink = HgfsUnlink, .rename = HgfsRename, .symlink = HgfsSymlink, .permission = HgfsPermission, .setattr = HgfsSetattr, /* Optional */ .getattr = HgfsGetattr, }; /* HGFS inode operations structure for files. */ struct inode_operations HgfsFileInodeOperations = { .permission = HgfsPermission, .setattr = HgfsSetattr, /* Optional */ .getattr = HgfsGetattr, }; /* * Private functions implementations. */ /* *---------------------------------------------------------------------- * * HgfsClearReadOnly -- * * Try to remove the file/dir read only attribute. * * Note when running on Windows servers the entry may have the read-only * flag set and prevent a rename or delete operation from occuring. * * Results: * Returns zero on success, or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsClearReadOnly(struct dentry *dentry) // IN: file/dir to remove read only { struct iattr enableWrite; LOG(4, (KERN_DEBUG "VMware hgfs: HgfsClearReadOnly: removing read-only\n")); enableWrite.ia_mode = (dentry->d_inode->i_mode | S_IWUSR); enableWrite.ia_valid = ATTR_MODE; return HgfsSetattr(dentry, &enableWrite); } /* *---------------------------------------------------------------------- * * HgfsDelete -- * * Handle both unlink and rmdir requests. * * Results: * Returns zero on success, or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsDelete(struct inode *dir, // IN: Parent dir of file/dir to delete struct dentry *dentry, // IN: Dentry of file/dir to delete HgfsOp op) // IN: Opcode for file type (file or dir) { HgfsReq *req = NULL; int result = 0; Bool secondAttempt = FALSE; HgfsStatus replyStatus; char *fileName = NULL; uint32 *fileNameLength; uint32 reqSize; HgfsOp opUsed; ASSERT(dir); ASSERT(dir->i_sb); ASSERT(dentry); ASSERT(dentry->d_inode); if (!dir || !dentry) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: NULL input\n")); result = -EFAULT; goto out; } if ((op != HGFS_OP_DELETE_FILE) && (op != HGFS_OP_DELETE_DIR)) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: Invalid opcode\n")); result = -EINVAL; goto out; } req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: if (op == HGFS_OP_DELETE_FILE) { opUsed = hgfsVersionDeleteFile; } else { opUsed = hgfsVersionDeleteDir; } if (opUsed == HGFS_OP_DELETE_FILE_V3 || opUsed == HGFS_OP_DELETE_DIR_V3) { HgfsRequestDeleteV3 *request; HgfsRequest *header; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestDeleteV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->hints = 0; fileName = request->fileName.name; fileNameLength = &request->fileName.length; request->fileName.fid = HGFS_INVALID_HANDLE; request->fileName.flags = 0; request->fileName.caseType = HGFS_FILE_NAME_DEFAULT_CASE; request->reserved = 0; reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestDelete *request; request = (HgfsRequestDelete *)(HGFS_REQ_PAYLOAD(req)); /* Fill out the request packet. */ request->header.id = req->id; request->header.op = opUsed; fileName = request->fileName.name; fileNameLength = &request->fileName.length; reqSize = sizeof *request; } /* Build full name to send to server. */ if (HgfsBuildPath(fileName, HGFS_NAME_BUFFER_SIZET(req->bufferSize, reqSize), dentry) < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: build path failed\n")); result = -EINVAL; goto out; } LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: deleting \"%s\", opUsed %u\n", fileName, opUsed)); /* Convert to CP name. */ result = CPName_ConvertTo(fileName, HGFS_NAME_BUFFER_SIZET(req->bufferSize, reqSize), fileName); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: CP conversion failed\n")); result = -EINVAL; goto out; } *fileNameLength = result; req->payloadSize = reqSize + result; result = HgfsSendRequest(req); if (result == 0) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsDelete: got reply\n")); replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: /* * Since we deleted the file, decrement its hard link count. As * we don't support hard links, this has the effect of making the * link count 0, which means that when the last reference to the * inode is dropped, the inode will be freed instead of moved to * the unused list. * * Also update the mtime/ctime of the parent directory, and the * ctime of the deleted file. */ compat_drop_nlink(dentry->d_inode); dentry->d_inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; break; case -EACCES: case -EPERM: /* * It's possible that we're talking to a Windows server with * a file marked read-only. Let's try again, after removing * the read-only bit from the file. * * XXX: I think old servers will send -EPERM here. Is this entirely * safe? */ if (!secondAttempt) { secondAttempt = TRUE; result = HgfsClearReadOnly(dentry); if (result == 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: file is no " "longer read-only, retrying delete\n")); goto retry; } LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: failed to remove " "read-only property\n")); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: second attempt at " "delete failed\n")); } break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_DELETE_DIR_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionDeleteDir = HGFS_OP_DELETE_DIR; goto retry; } else if (opUsed == HGFS_OP_DELETE_FILE_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionDeleteFile = HGFS_OP_DELETE_FILE; goto retry; } LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: server " "returned error: %d\n", result)); break; default: break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsDelete: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; } /* *---------------------------------------------------------------------- * * HgfsPackSetattrRequest -- * * Setup the Setattr request, depending on the op version. When possible, * we will issue the setattr request using an existing open HGFS handle. * * Results: * Returns zero on success, or negative error on failure. * * On success, the changed argument is set indicating whether the * attributes have actually changed. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsPackSetattrRequest(struct iattr *iattr, // IN: Inode attrs to update from struct dentry *dentry, // IN: File to set attributes of Bool allowHandleReuse, // IN: Can we use a handle? HgfsOp opUsed, // IN: Op to be used HgfsReq *req, // IN/OUT: Packet to write into Bool *changed) // OUT: Have the attrs changed? { HgfsAttrV2 *attrV2; HgfsAttr *attr; HgfsAttrHint *hints; HgfsAttrChanges *update; HgfsHandle handle; char *fileName = NULL; uint32 *fileNameLength = NULL; unsigned int valid; size_t reqBufferSize; size_t reqSize; int result = 0; uid_t attrUid = -1; gid_t attrGid = -1; ASSERT(iattr); ASSERT(dentry); ASSERT(req); ASSERT(changed); valid = iattr->ia_valid; if (valid & ATTR_UID) { attrUid = from_kuid(&init_user_ns, iattr->ia_uid); } if (valid & ATTR_GID) { attrGid = from_kgid(&init_user_ns, iattr->ia_gid); } switch (opUsed) { case HGFS_OP_SETATTR_V3: { HgfsRequest *requestHeader; HgfsRequestSetattrV3 *requestV3; requestHeader = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); requestHeader->op = opUsed; requestHeader->id = req->id; requestV3 = (HgfsRequestSetattrV3 *)HGFS_REQ_PAYLOAD_V3(req); attrV2 = &requestV3->attr; hints = &requestV3->hints; /* * Clear attributes, mask, and hints before touching them. * We can't rely on GetNewRequest() to zero our structures, so * make sure to zero them all here. */ memset(attrV2, 0, sizeof *attrV2); memset(hints, 0, sizeof *hints); /* * When possible, issue a setattr using an existing handle. This will * give us slightly better performance on a Windows server, and is more * correct regardless. If we don't find a handle, fall back on setattr * by name. * * Changing the size (via truncate) requires write permissions. Changing * the times also requires write permissions on Windows, so we require it * here too. Otherwise, any handle will do. */ if (allowHandleReuse && HgfsGetHandle(dentry->d_inode, (valid & ATTR_SIZE) || (valid & ATTR_ATIME) || (valid & ATTR_MTIME) ? HGFS_OPEN_MODE_WRITE_ONLY + 1 : 0, &handle) == 0) { requestV3->fileName.fid = handle; requestV3->fileName.flags = HGFS_FILE_NAME_USE_FILE_DESC; requestV3->fileName.caseType = HGFS_FILE_NAME_DEFAULT_CASE; requestV3->fileName.length = 0; LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPackSetattrRequest: setting " "attributes of handle %u\n", handle)); } else { fileName = requestV3->fileName.name; fileNameLength = &requestV3->fileName.length; requestV3->fileName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; requestV3->fileName.fid = HGFS_INVALID_HANDLE; requestV3->fileName.flags = 0; } requestV3->reserved = 0; reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(requestV3); reqBufferSize = HGFS_NAME_BUFFER_SIZET(req->bufferSize, reqSize); /* * We only support changing these attributes: * - all mode bits (i.e. all permissions) * - uid/gid * - size * - access/write times */ if (valid & ATTR_MODE) { attrV2->mask |= HGFS_ATTR_VALID_SPECIAL_PERMS | HGFS_ATTR_VALID_OWNER_PERMS | HGFS_ATTR_VALID_GROUP_PERMS | HGFS_ATTR_VALID_OTHER_PERMS; attrV2->specialPerms = ((iattr->ia_mode & (S_ISUID | S_ISGID | S_ISVTX)) >> 9); attrV2->ownerPerms = ((iattr->ia_mode & S_IRWXU) >> 6); attrV2->groupPerms = ((iattr->ia_mode & S_IRWXG) >> 3); attrV2->otherPerms = (iattr->ia_mode & S_IRWXO); *changed = TRUE; } if (valid & ATTR_UID) { attrV2->mask |= HGFS_ATTR_VALID_USERID; attrV2->userId = attrUid; *changed = TRUE; } if (valid & ATTR_GID) { attrV2->mask |= HGFS_ATTR_VALID_GROUPID; attrV2->groupId = attrGid; *changed = TRUE; } if (valid & ATTR_SIZE) { attrV2->mask |= HGFS_ATTR_VALID_SIZE; attrV2->size = iattr->ia_size; *changed = TRUE; } if (valid & ATTR_ATIME) { attrV2->mask |= HGFS_ATTR_VALID_ACCESS_TIME; attrV2->accessTime = HGFS_GET_TIME(iattr->ia_atime); if (valid & ATTR_ATIME_SET) { *hints |= HGFS_ATTR_HINT_SET_ACCESS_TIME; } *changed = TRUE; } if (valid & ATTR_MTIME) { attrV2->mask |= HGFS_ATTR_VALID_WRITE_TIME; attrV2->writeTime = HGFS_GET_TIME(iattr->ia_mtime); if (valid & ATTR_MTIME_SET) { *hints |= HGFS_ATTR_HINT_SET_WRITE_TIME; } *changed = TRUE; } break; } case HGFS_OP_SETATTR_V2: { HgfsRequestSetattrV2 *requestV2; requestV2 = (HgfsRequestSetattrV2 *)(HGFS_REQ_PAYLOAD(req)); requestV2->header.op = opUsed; requestV2->header.id = req->id; attrV2 = &requestV2->attr; hints = &requestV2->hints; /* * Clear attributes, mask, and hints before touching them. * We can't rely on GetNewRequest() to zero our structures, so * make sure to zero them all here. */ memset(attrV2, 0, sizeof *attrV2); memset(hints, 0, sizeof *hints); /* * When possible, issue a setattr using an existing handle. This will * give us slightly better performance on a Windows server, and is more * correct regardless. If we don't find a handle, fall back on setattr * by name. * * Changing the size (via truncate) requires write permissions. Changing * the times also requires write permissions on Windows, so we require it * here too. Otherwise, any handle will do. */ if (allowHandleReuse && HgfsGetHandle(dentry->d_inode, (valid & ATTR_SIZE) || (valid & ATTR_ATIME) || (valid & ATTR_MTIME) ? HGFS_OPEN_MODE_WRITE_ONLY + 1 : 0, &handle) == 0) { *hints = HGFS_ATTR_HINT_USE_FILE_DESC; requestV2->file = handle; LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPackSetattrRequest: setting " "attributes of handle %u\n", handle)); } else { fileName = requestV2->fileName.name; fileNameLength = &requestV2->fileName.length; } reqSize = sizeof *requestV2; reqBufferSize = HGFS_NAME_BUFFER_SIZE(req->bufferSize, requestV2); /* * We only support changing these attributes: * - all mode bits (i.e. all permissions) * - uid/gid * - size * - access/write times */ if (valid & ATTR_MODE) { attrV2->mask |= HGFS_ATTR_VALID_SPECIAL_PERMS | HGFS_ATTR_VALID_OWNER_PERMS | HGFS_ATTR_VALID_GROUP_PERMS | HGFS_ATTR_VALID_OTHER_PERMS; attrV2->specialPerms = ((iattr->ia_mode & (S_ISUID | S_ISGID | S_ISVTX)) >> 9); attrV2->ownerPerms = ((iattr->ia_mode & S_IRWXU) >> 6); attrV2->groupPerms = ((iattr->ia_mode & S_IRWXG) >> 3); attrV2->otherPerms = (iattr->ia_mode & S_IRWXO); *changed = TRUE; } if (valid & ATTR_UID) { attrV2->mask |= HGFS_ATTR_VALID_USERID; attrV2->userId = attrUid; *changed = TRUE; } if (valid & ATTR_GID) { attrV2->mask |= HGFS_ATTR_VALID_GROUPID; attrV2->groupId = attrGid; *changed = TRUE; } if (valid & ATTR_SIZE) { attrV2->mask |= HGFS_ATTR_VALID_SIZE; attrV2->size = iattr->ia_size; *changed = TRUE; } if (valid & ATTR_ATIME) { attrV2->mask |= HGFS_ATTR_VALID_ACCESS_TIME; attrV2->accessTime = HGFS_GET_TIME(iattr->ia_atime); if (valid & ATTR_ATIME_SET) { *hints |= HGFS_ATTR_HINT_SET_ACCESS_TIME; } *changed = TRUE; } if (valid & ATTR_MTIME) { attrV2->mask |= HGFS_ATTR_VALID_WRITE_TIME; attrV2->writeTime = HGFS_GET_TIME(iattr->ia_mtime); if (valid & ATTR_MTIME_SET) { *hints |= HGFS_ATTR_HINT_SET_WRITE_TIME; } *changed = TRUE; } break; } case HGFS_OP_SETATTR: { HgfsRequestSetattr *request; request = (HgfsRequestSetattr *)(HGFS_REQ_PAYLOAD(req)); request->header.op = opUsed; request->header.id = req->id; attr = &request->attr; update = &request->update; /* We'll use these later. */ fileName = request->fileName.name; fileNameLength = &request->fileName.length; reqSize = sizeof *request; reqBufferSize = HGFS_NAME_BUFFER_SIZE(req->bufferSize, request); /* * Clear attributes before touching them. * We can't rely on GetNewRequest() to zero our structures, so * make sure to zero them all here. */ memset(attr, 0, sizeof *attr); memset(update, 0, sizeof *update); /* * We only support changing these attributes: * - owner mode bits (i.e. owner permissions) * - size * - access/write times */ if (valid & ATTR_MODE) { *update |= HGFS_ATTR_PERMISSIONS; attr->permissions = ((iattr->ia_mode & S_IRWXU) >> 6); *changed = TRUE; } if (valid & ATTR_SIZE) { *update |= HGFS_ATTR_SIZE; attr->size = iattr->ia_size; *changed = TRUE; } if (valid & ATTR_ATIME) { *update |= HGFS_ATTR_ACCESS_TIME | ((valid & ATTR_ATIME_SET) ? HGFS_ATTR_ACCESS_TIME_SET : 0); attr->accessTime = HGFS_GET_TIME(iattr->ia_atime); *changed = TRUE; } if (valid & ATTR_MTIME) { *update |= HGFS_ATTR_WRITE_TIME | ((valid & ATTR_MTIME_SET) ? HGFS_ATTR_WRITE_TIME_SET : 0); attr->writeTime = HGFS_GET_TIME(iattr->ia_mtime); *changed = TRUE; } break; } default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackSetattrRequest: unexpected " "OP type encountered\n")); return -EPROTO; } /* Avoid all this extra work when we're doing a setattr by handle. */ if (fileName != NULL) { /* Build full name to send to server. */ if (HgfsBuildPath(fileName, reqBufferSize, dentry) < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackSetattrRequest: build path " "failed\n")); return -EINVAL; } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPackSetattrRequest: setting " "attributes of \"%s\"\n", fileName)); /* Convert to CP name. */ result = CPName_ConvertTo(fileName, reqBufferSize, fileName); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackSetattrRequest: CP " "conversion failed\n")); return -EINVAL; } *fileNameLength = result; } req->payloadSize = reqSize + result; return 0; } /* *---------------------------------------------------------------------- * * HgfsPackCreateDirRequest -- * * Setup the CreateDir request, depending on the op version. * * Results: * Returns zero on success, or negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsPackCreateDirRequest(struct dentry *dentry, // IN: Directory to create compat_umode_t mode, // IN: Mode to assign dir HgfsOp opUsed, // IN: Op to be used. HgfsReq *req) // IN/OUT: Packet to write into { char *fileName = NULL; uint32 *fileNameLength; size_t requestSize; int result; ASSERT(dentry); ASSERT(req); switch (opUsed) { case HGFS_OP_CREATE_DIR_V3: { HgfsRequest *requestHeader; HgfsRequestCreateDirV3 *requestV3; requestHeader = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); requestHeader->op = opUsed; requestHeader->id = req->id; requestV3 = (HgfsRequestCreateDirV3 *)(HGFS_REQ_PAYLOAD_V3(req)); /* We'll use these later. */ fileName = requestV3->fileName.name; fileNameLength = &requestV3->fileName.length; requestV3->fileName.flags = 0; requestV3->fileName.fid = HGFS_INVALID_HANDLE; requestV3->fileName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; requestSize = HGFS_REQ_PAYLOAD_SIZE_V3(requestV3); requestV3->mask = HGFS_CREATE_DIR_MASK; /* Set permissions. */ requestV3->specialPerms = (mode & (S_ISUID | S_ISGID | S_ISVTX)) >> 9; requestV3->ownerPerms = (mode & S_IRWXU) >> 6; requestV3->groupPerms = (mode & S_IRWXG) >> 3; requestV3->otherPerms = (mode & S_IRWXO); requestV3->fileAttr = 0; break; } case HGFS_OP_CREATE_DIR_V2: { HgfsRequestCreateDirV2 *requestV2; requestV2 = (HgfsRequestCreateDirV2 *)(HGFS_REQ_PAYLOAD(req)); requestV2->header.op = opUsed; requestV2->header.id = req->id; /* We'll use these later. */ fileName = requestV2->fileName.name; fileNameLength = &requestV2->fileName.length; requestSize = sizeof *requestV2; requestV2->mask = HGFS_CREATE_DIR_MASK; /* Set permissions. */ requestV2->specialPerms = (mode & (S_ISUID | S_ISGID | S_ISVTX)) >> 9; requestV2->ownerPerms = (mode & S_IRWXU) >> 6; requestV2->groupPerms = (mode & S_IRWXG) >> 3; requestV2->otherPerms = (mode & S_IRWXO); break; } case HGFS_OP_CREATE_DIR: { HgfsRequestCreateDir *request; request = (HgfsRequestCreateDir *)(HGFS_REQ_PAYLOAD(req)); /* We'll use these later. */ fileName = request->fileName.name; fileNameLength = &request->fileName.length; requestSize = sizeof *request; requestSize = sizeof *request; /* Set permissions. */ request->permissions = (mode & S_IRWXU) >> 6; break; } default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackCreateDirRequest: unexpected " "OP type encountered\n")); return -EPROTO; } /* Build full name to send to server. */ if (HgfsBuildPath(fileName, req->bufferSize - (requestSize - 1), dentry) < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackCreateDirRequest: build path " "failed\n")); return -EINVAL; } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPackCreateDirRequest: create dir " "\"%s\", perms %o\n", fileName, mode)); /* Convert to CP name. */ result = CPName_ConvertTo(fileName, req->bufferSize - (requestSize - 1), fileName); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackCreateDirRequest: CP " "conversion failed\n")); return -EINVAL; } *fileNameLength = result; req->payloadSize = requestSize + result; return 0; } /* *---------------------------------------------------------------------- * * HgfsTruncatePages -- * * Following a truncate operation on the server, we must update the * page cache's view of the file by truncating some pages. This is a * two step procedure. First we call vmtruncate() to truncate all * whole pages. Then we get the boundary page from the page cache * ourselves, compute where the truncation began, and memset() the * rest of the page to zero. * * Results: * Returns zero on success, or negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsTruncatePages(struct inode *inode, // IN: Inode whose page to truncate loff_t newSize) // IN: New size of the file { int result; pgoff_t pageIndex = newSize >> PAGE_CACHE_SHIFT; unsigned pageOffset = newSize & (PAGE_CACHE_SIZE - 1); struct page *page; char *buffer; ASSERT(inode); LOG(4, (KERN_DEBUG "VMware hgfs: HgfsTruncatePages: entered\n")); /* * In 3.8.0, vmtruncate was removed and replaced by calling the check * size and set directly. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0) result = vmtruncate(inode, newSize); #else result = inode_newsize_ok(inode, newSize); if (0 == result) { truncate_setsize(inode, newSize); } #endif if (result) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsTruncatePages: vmtruncate failed " "with error code %d\n", result)); return result; } /* * This is a bit complicated, so it merits an explanation. grab_cache_page() * will give us back the page with the specified index, after having locked * and incremented its reference count. We must first map it into memory so * we can modify it. After we're done modifying the page, we flush its data * from the data cache, unmap it, release our reference, and unlock it. */ page = grab_cache_page(inode->i_mapping, pageIndex); if (page == NULL) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsTruncatePages: could not get page " "with index %lu from page cache\n", pageIndex)); return -ENOMEM; } buffer = kmap(page); memset(buffer + pageOffset, 0, PAGE_CACHE_SIZE - pageOffset); flush_dcache_page(page); kunmap(page); page_cache_release(page); compat_unlock_page(page); return 0; } /* * HGFS inode operations. */ /* *---------------------------------------------------------------------- * * HgfsCreate -- * * Create inode for a new file. Called directly by vfs_create, * which is called by open_namei (both in fs/namei.c), as a result * of someone doing a creat(2) or an open(2) with O_CREAT. * * This gets called BEFORE f_op->open is called, so the file on the * remote end has not been created yet when we get here. So, we * just cheat and create a reasonable looking inode and instantiate * it. When this returns, our open routine will get called, which * will create the actual file on the server. If that fails for * some reason, dentry_open (which calls f_op->open) will cleanup * things and fput the dentry. * * XXX: Now that we do care about having valid inode numbers, it is * unfortunate but necessary that we "cheat" here. The problem is that * without the "intent" field from the nameidata struct (which we don't * get prior to 2.5.75), we have no way of knowing whether the file was * opened with O_EXCL or O_TRUNC. Knowing about O_TRUNC isn't crucial * because we can always create the file now and truncate it later, in * HgfsOpen. But without knowing about O_EXCL, we can't "fail if the file * exists on the server", which is the desired behavior for O_EXCL. The * source code for NFSv3 in 2.4.2 describes this shortcoming. The only * solution, barring massive architectural differences between the 2.4 and * 2.6 HGFS drivers, is to ignore O_EXCL, but we've supported it up until * now... * * Results: * Returns zero on success, negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsCreate(struct inode *dir, // IN: Parent dir to create in struct dentry *dentry, // IN: Dentry containing name to create compat_umode_t mode, // IN: Mode of file to be created #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) bool excl // IN: O_EXCL #else struct nameidata *nd // IN: Intent, vfsmount, ... #endif ) { HgfsAttrInfo attr; int result; ASSERT(dir); ASSERT(dentry); /* * We can call HgfsBuildPath and make the full path to this new entry, * but why bother if it's only for logging. */ LOG(6, (KERN_DEBUG "VMware hgfs: HgfsCreate: new entry \"%s\"\n", dentry->d_name.name)); /* Create appropriate attrs for this file. */ attr.type = HGFS_FILE_TYPE_REGULAR; attr.size = 0; /* just to be explicit */ attr.specialPerms = ((mode & (S_ISUID | S_ISGID | S_ISVTX)) >> 9); attr.ownerPerms = (mode & S_IRWXU) >> 6; attr.groupPerms = (mode & S_IRWXG) >> 3; attr.otherPerms = mode & S_IRWXO; attr.mask = HGFS_ATTR_VALID_TYPE | HGFS_ATTR_VALID_SIZE | HGFS_ATTR_VALID_SPECIAL_PERMS | HGFS_ATTR_VALID_OWNER_PERMS | HGFS_ATTR_VALID_GROUP_PERMS | HGFS_ATTR_VALID_OTHER_PERMS; result = HgfsInstantiate(dentry, 0, &attr); /* * Mark the inode as recently created but not yet opened so that if we do * fail to create the actual file in HgfsOpen, we know to force a * revalidate so that the next operation on this inode will fail. */ if (result == 0) { HgfsInodeInfo *iinfo = INODE_GET_II_P(dentry->d_inode); iinfo->createdAndUnopened = TRUE; } return result; } /* *---------------------------------------------------------------------- * * HgfsLookup -- * * Lookup a file in a directory. * * We do a getattr to see if the file exists on the server, and if * so we create a new inode and fill in the fields appropriately by * calling HgfsIget with the results of the getattr, and then * call d_add with the new dentry. * * For the curious, the way lookup in linux works (see fs/namei.c) * is roughly as follows: first a d_lookup is done to see if there * is an appropriate entry in the dcache already. If there is, it * is revalidated by calling d_op->d_revalidate, which calls our * HgfsDentryRevalidate (see above). If there is no dentry in the * cache or if the dentry is no longer valid, then namei calls * i_op->lookup, which calls HgfsLookup. * * Results: * Returns NULL on success, negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static struct dentry * HgfsLookup(struct inode *dir, // IN: Inode of parent directory struct dentry *dentry, // IN: Dentry containing name to look up #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) unsigned int flags #else struct nameidata *nd // IN: Intent, vfsmount, ... #endif ) { HgfsAttrInfo attr; struct inode *inode; int error = 0; ASSERT(dir); ASSERT(dentry); if (!dir || !dentry) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsLookup: NULL input\n")); error = -EFAULT; goto error; } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsLookup: dir ino %lu, i_dev %u\n", dir->i_ino, dir->i_sb->s_dev)); LOG(6, (KERN_DEBUG "VMware hgfs: HgfsLookup: entry name is \"%s\"\n", dentry->d_name.name)); /* Do a getattr on the file to see if it exists on the server. */ inode = NULL; error = HgfsPrivateGetattr(dentry, &attr, NULL); if (!error) { /* File exists on the server. */ /* * Get the inode with this inode number and the attrs we got from * the server. */ inode = HgfsIget(dir->i_sb, 0, &attr); if (!inode) { error = -ENOMEM; LOG(4, (KERN_DEBUG "VMware hgfs: HgfsLookup: out of memory getting " "inode\n")); goto error; } } else if (error != -ENOENT) { /* * Either the file doesn't exist or there was a more serious * error; if it's the former, it's okay, we just do nothing. */ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsLookup: error other " "than ENOENT: %d\n", error)); goto error; } /* * Set the dentry's time to NOW, set its operations pointer, add it * and the new (possibly NULL) inode to the dcache. */ HgfsDentryAgeReset(dentry); dentry->d_op = &HgfsDentryOperations; LOG(6, (KERN_DEBUG "VMware hgfs: HgfsLookup: adding new entry\n")); d_add(dentry, inode); return NULL; error: return ERR_PTR(error); } /* *---------------------------------------------------------------------- * * HgfsMkdir -- * * Handle a mkdir request * * Results: * Returns zero on success, or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsMkdir(struct inode *dir, // IN: Inode of parent directory struct dentry *dentry, // IN: Dentry with name to be created compat_umode_t mode) // IN: Mode of dir to be created { HgfsReq *req; HgfsStatus replyStatus; HgfsOp opUsed; int result = 0; ASSERT(dir); ASSERT(dir->i_sb); ASSERT(dentry); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsMkdir: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionCreateDir; result = HgfsPackCreateDirRequest(dentry, mode, opUsed, req); if (result != 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsMkdir: error packing request\n")); goto out; } /* * Send the request and process the reply. Since HgfsReplyCreateDirV2 and * HgfsReplyCreateDir are identical, we need no special logic here. */ result = HgfsSendRequest(req); if (result == 0) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsMkdir: got reply\n")); replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: LOG(6, (KERN_DEBUG "VMware hgfs: HgfsMkdir: directory created " "successfully, instantiating dentry\n")); result = HgfsInstantiate(dentry, 0, NULL); if (result == 0) { /* * Attempt to set host directory's uid/gid to that of the * current user. As with the open(.., O_CREAT) case, this is * only expected to work when the hgfs server is running on * a Linux machine and as root, but we might as well give it * a go. */ HgfsSetUidGid(dir, dentry, current_fsuid(), current_fsgid()); } /* * XXX: When we support hard links, this is a good place to * increment link count of parent dir. */ break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_CREATE_DIR_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsMkdir: Version 3 not " "supported. Falling back to version 2.\n")); hgfsVersionCreateDir = HGFS_OP_CREATE_DIR_V2; goto retry; } else if (opUsed == HGFS_OP_CREATE_DIR_V2) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsMkdir: Version 2 not " "supported. Falling back to version 1.\n")); hgfsVersionCreateDir = HGFS_OP_CREATE_DIR; goto retry; } /* Fallthrough. */ default: LOG(6, (KERN_DEBUG "VMware hgfs: HgfsMkdir: directory was not " "created, error %d\n", result)); break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsMkdir: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsMkdir: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsMkdir: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; } /* *---------------------------------------------------------------------- * * HgfsRmdir -- * * Handle an rmdir request. Just calls HgfsDelete with the * correct opcode. * * Results: * Returns zero on success, or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsRmdir(struct inode *dir, // IN: Parent dir of dir to remove struct dentry *dentry) // IN: Dentry of dir to remove { int result; LOG(8, (KERN_DEBUG "VMware hgfs: HgfsRmdir: was called\n")); /* * XXX: CIFS also sets the size of the deleted directory to 0. Why? I don't * know...why not? * * XXX: When we support hardlinks, we should decrement the link count of * the parent directory. */ result = HgfsDelete(dir, dentry, HGFS_OP_DELETE_DIR); if (!result) { compat_i_size_write(dentry->d_inode, 0); } return result; } /* *---------------------------------------------------------------------- * * HgfsUnlink -- * * Handle an unlink request. Just calls HgfsDelete with the * correct opcode. * * Results: * Returns zero on success, or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsUnlink(struct inode *dir, // IN: Parent dir of file to unlink struct dentry *dentry) // IN: Dentry of file to unlink { LOG(8, (KERN_DEBUG "VMware hgfs: HgfsUnlink: was called\n")); return HgfsDelete(dir, dentry, HGFS_OP_DELETE_FILE); } /* *---------------------------------------------------------------------- * * HgfsRename -- * * Handle rename requests. * * Results: * Returns zero on success, or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsRename(struct inode *oldDir, // IN: Inode of original directory struct dentry *oldDentry, // IN: Dentry of file to rename struct inode *newDir, // IN: Inode of new directory struct dentry *newDentry) // IN: Dentry containing new name { HgfsReq *req = NULL; char *oldName; char *newName; Bool secondAttempt=FALSE; uint32 *oldNameLength; uint32 *newNameLength; int result = 0; uint32 reqSize; HgfsOp opUsed; HgfsStatus replyStatus; ASSERT(oldDir); ASSERT(oldDir->i_sb); ASSERT(oldDentry); ASSERT(newDir); ASSERT(newDentry); if (!oldDir || !oldDentry || !newDir || !newDentry) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: NULL input\n")); result = -EFAULT; goto out; } if (oldDentry->d_inode && newDentry->d_inode) { HgfsInodeInfo *oldIinfo; HgfsInodeInfo *newIinfo; /* * Don't do rename if the source and target are identical (from the * viewpoint of the host). It is possible that multiple guest inodes * point to the same host inode under the case that both one folder * and its subfolder are mapped as hgfs sharese. Please also see the * comments at fsutil.c/HgfsIget. */ oldIinfo = INODE_GET_II_P(oldDentry->d_inode); newIinfo = INODE_GET_II_P(newDentry->d_inode); if (oldIinfo->hostFileId !=0 && newIinfo->hostFileId != 0 && oldIinfo->hostFileId == newIinfo->hostFileId) { LOG(4, ("VMware hgfs: %s: source and target are the same file.\n", __func__)); result = -EEXIST; goto out; } } req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionRename; if (opUsed == HGFS_OP_RENAME_V3) { HgfsRequestRenameV3 *request = (HgfsRequestRenameV3 *)HGFS_REQ_PAYLOAD_V3(req); HgfsRequest *header = (HgfsRequest *)HGFS_REQ_PAYLOAD(req); header->op = opUsed; header->id = req->id; oldName = request->oldName.name; oldNameLength = &request->oldName.length; request->hints = 0; request->oldName.flags = 0; request->oldName.fid = HGFS_INVALID_HANDLE; request->oldName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; request->reserved = 0; reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestRename *request = (HgfsRequestRename *)HGFS_REQ_PAYLOAD(req); request->header.op = opUsed; oldName = request->oldName.name; oldNameLength = &request->oldName.length; reqSize = sizeof *request; } /* Build full old name to send to server. */ if (HgfsBuildPath(oldName, HGFS_NAME_BUFFER_SIZET(req->bufferSize, reqSize), oldDentry) < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: build old path failed\n")); result = -EINVAL; goto out; } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsRename: Old name: \"%s\"\n", oldName)); /* Convert old name to CP format. */ result = CPName_ConvertTo(oldName, HGFS_NAME_BUFFER_SIZET(req->bufferSize, reqSize), oldName); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: oldName CP " "conversion failed\n")); result = -EINVAL; goto out; } *oldNameLength = result; reqSize += result; /* * Build full new name to send to server. * Note the different buffer length. This is because HgfsRequestRename * contains two filenames, and once we place the first into the packet we * must account for it when determining the amount of buffer available for * the second. */ if (opUsed == HGFS_OP_RENAME_V3) { HgfsRequestRenameV3 *request = (HgfsRequestRenameV3 *)HGFS_REQ_PAYLOAD_V3(req); HgfsFileNameV3 *newNameP; newNameP = (HgfsFileNameV3 *)((char *)&request->oldName + sizeof request->oldName + result); newName = newNameP->name; newNameLength = &newNameP->length; newNameP->flags = 0; newNameP->fid = HGFS_INVALID_HANDLE; newNameP->caseType = HGFS_FILE_NAME_CASE_SENSITIVE; } else { HgfsRequestRename *request = (HgfsRequestRename *)HGFS_REQ_PAYLOAD(req); HgfsFileName *newNameP; newNameP = (HgfsFileName *)((char *)&request->oldName + sizeof request->oldName + result); newName = newNameP->name; newNameLength = &newNameP->length; } if (HgfsBuildPath(newName, HGFS_NAME_BUFFER_SIZET(req->bufferSize, reqSize) - result, newDentry) < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: build new path failed\n")); result = -EINVAL; goto out; } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsRename: New name: \"%s\"\n", newName)); /* Convert new name to CP format. */ result = CPName_ConvertTo(newName, HGFS_NAME_BUFFER_SIZET(req->bufferSize, reqSize) - result, newName); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: newName CP " "conversion failed\n")); result = -EINVAL; goto out; } *newNameLength = result; reqSize += result; req->payloadSize = reqSize; result = HgfsSendRequest(req); if (result == 0) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsRename: got reply\n")); replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); if (result == -EPROTO) { /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_RENAME_V3) { hgfsVersionRename = HGFS_OP_RENAME; goto retry; } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: server " "returned error: %d\n", result)); goto out; } } else if ((-EACCES == result) || (-EPERM == result)) { /* * It's possible that we're talking to a Windows server with * a file marked read-only. Let's try again, after removing * the read-only bit from the file. * * XXX: I think old servers will send -EPERM here. Is this entirely * safe? * We can receive EACCES or EPERM if we don't have the correct * permission on the source file. So lets not assume that we have * a target and only clear the target if there is one. */ if (!secondAttempt && newDentry->d_inode != NULL) { secondAttempt = TRUE; LOG(4, (KERN_DEBUG "VMware hgfs: %s:clear target RO mode %8x\n", __func__, newDentry->d_inode->i_mode)); result = HgfsClearReadOnly(newDentry); if (result == 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: file is no " "longer read-only, retrying rename\n")); goto retry; } LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: failed to remove " "read-only property\n")); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: second attempt or " "no target failed\n")); } } else if (0 != result) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: failed with result %d\n", result)); } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRename: unknown error: " "%d\n", result)); } #if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) if (result == 0) { /* * We force revalidate to go get the file info as soon as needed. * We only add this fix, borrowed from CIFS, for newer versions * of the kernel which have the current_fs_time function. * For details see bug 1613734 but here is a short summary. * This addresses issues in editors such as gedit which use * rename when saving the updated contents of a file. * If we don't force the revalidation here, then the dentry * will randomly age over some time which will then pick up the * file's new timestamps from the server at that time. * This delay will cause the editor to think the file has been modified * underneath it and prompt the user if they want to reload the file. */ HgfsDentryAgeForce(oldDentry); HgfsDentryAgeForce(newDentry); oldDir->i_ctime = oldDir->i_mtime = newDir->i_ctime = newDir->i_mtime = current_fs_time(oldDir->i_sb); } #endif // LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 32) out: HgfsFreeRequest(req); return result; } /* *---------------------------------------------------------------------- * * HgfsPackSymlinkCreateRequest -- * * Setup the create symlink request, depending on the op version. * * Results: * Returns zero on success, or negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsPackSymlinkCreateRequest(struct dentry *dentry, // IN: File pointer for this open const char *symname, // IN: Target name HgfsOp opUsed, // IN: Op to be used HgfsReq *req) // IN/OUT: Packet to write into { HgfsRequestSymlinkCreateV3 *requestV3 = NULL; HgfsRequestSymlinkCreate *request = NULL; char *symlinkName; uint32 *symlinkNameLength; char *targetName; uint32 *targetNameLength; size_t targetNameBytes; size_t requestSize; int result; ASSERT(dentry); ASSERT(symname); ASSERT(req); switch (opUsed) { case HGFS_OP_CREATE_SYMLINK_V3: { HgfsRequest *requestHeader; requestHeader = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); requestHeader->op = opUsed; requestHeader->id = req->id; requestV3 = (HgfsRequestSymlinkCreateV3 *)HGFS_REQ_PAYLOAD_V3(req); /* We'll use these later. */ symlinkName = requestV3->symlinkName.name; symlinkNameLength = &requestV3->symlinkName.length; requestV3->symlinkName.flags = 0; requestV3->symlinkName.fid = HGFS_INVALID_HANDLE; requestV3->symlinkName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; requestV3->reserved = 0; requestSize = HGFS_REQ_PAYLOAD_SIZE_V3(requestV3); break; } case HGFS_OP_CREATE_SYMLINK: { request = (HgfsRequestSymlinkCreate *)(HGFS_REQ_PAYLOAD(req)); request->header.op = opUsed; request->header.id = req->id; /* We'll use these later. */ symlinkName = request->symlinkName.name; symlinkNameLength = &request->symlinkName.length; requestSize = sizeof *request; break; } default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackSymlinkCreateRequest: unexpected " "OP type encountered\n")); return -EPROTO; } if (HgfsBuildPath(symlinkName, req->bufferSize - (requestSize - 1), dentry) < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackSymlinkCreateRequest: build symlink path " "failed\n")); return -EINVAL; } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPackSymlinkCreateRequest: Symlink name: \"%s\"\n", symlinkName)); /* Convert symlink name to CP format. */ result = CPName_ConvertTo(symlinkName, req->bufferSize - (requestSize - 1), symlinkName); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackSymlinkCreateRequest: symlinkName CP " "conversion failed\n")); return -EINVAL; } *symlinkNameLength = result; req->payloadSize = requestSize + result; /* * Note the different buffer length. This is because HgfsRequestSymlink * contains two filenames, and once we place the first into the packet we * must account for it when determining the amount of buffer available for * the second. * * Also note that targetNameBytes accounts for the NUL character. Once * we've converted it to CP name, it won't be NUL-terminated and the length * of the string in the packet itself won't account for it. */ if (opUsed == HGFS_OP_CREATE_SYMLINK_V3) { HgfsFileNameV3 *fileNameP; fileNameP = (HgfsFileNameV3 *)((char *)&requestV3->symlinkName + sizeof requestV3->symlinkName + result); targetName = fileNameP->name; targetNameLength = &fileNameP->length; fileNameP->flags = 0; fileNameP->fid = HGFS_INVALID_HANDLE; fileNameP->caseType = HGFS_FILE_NAME_CASE_SENSITIVE; } else { HgfsFileName *fileNameP; fileNameP = (HgfsFileName *)((char *)&request->symlinkName + sizeof request->symlinkName + result); targetName = fileNameP->name; targetNameLength = &fileNameP->length; } targetNameBytes = strlen(symname) + 1; /* Copy target name into request packet. */ if (targetNameBytes > req->bufferSize - (requestSize - 1)) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackSymlinkCreateRequest: target name is too " "big\n")); return -EINVAL; } memcpy(targetName, symname, targetNameBytes); LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPackSymlinkCreateRequest: target name: \"%s\"\n", targetName)); /* Convert target name to CPName-lite format. */ CPNameLite_ConvertTo(targetName, targetNameBytes - 1, '/'); *targetNameLength = targetNameBytes - 1; req->payloadSize += targetNameBytes - 1; return 0; } /* *---------------------------------------------------------------------- * * HgfsSymlink -- * * Handle a symlink request * * Results: * Returns zero on success, or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsSymlink(struct inode *dir, // IN: Inode of parent directory struct dentry *dentry, // IN: Dentry of new symlink file const char *symname) // IN: Target name { HgfsReq *req; int result = 0; HgfsOp opUsed; HgfsStatus replyStatus; ASSERT(dir); ASSERT(dir->i_sb); ASSERT(dentry); ASSERT(symname); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSymlink: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionCreateSymlink; result = HgfsPackSymlinkCreateRequest(dentry, symname, opUsed, req); if (result != 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSymlink: error packing request\n")); goto out; } result = HgfsSendRequest(req); if (result == 0) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsSymlink: got reply\n")); replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); if (result == 0) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsSymlink: symlink created " "successfully, instantiating dentry\n")); result = HgfsInstantiate(dentry, 0, NULL); } else if (result == -EPROTO) { /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_CREATE_SYMLINK_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSymlink: Version 3 " "not supported. Falling back to version 2.\n")); hgfsVersionCreateSymlink = HGFS_OP_CREATE_SYMLINK; goto retry; } else { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsSymlink: symlink was not " "created, error %d\n", result)); } } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSymlink: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSymlink: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSymlink: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; } /* *---------------------------------------------------------------------------- * * HgfsAccessInt -- * * Check to ensure the user has the specified type of access to the file. * * Results: * Returns 0 if access is allowed and a non-zero error code otherwise. * * Side effects: * None. * *---------------------------------------------------------------------------- */ static int HgfsAccessInt(struct dentry *dentry, // IN: dentry to check access for int mask) // IN: access mode requested. { HgfsAttrInfo attr; int ret; if (!dentry) { return 0; } ret = HgfsPrivateGetattr(dentry, &attr, NULL); if (ret == 0) { uint32 effectivePermissions; if (attr.mask & HGFS_ATTR_VALID_EFFECTIVE_PERMS) { effectivePermissions = attr.effectivePerms; } else { /* * If the server did not return actual effective permissions then * need to calculate ourselves. However we should avoid unnecessary * denial of access so perform optimistic permissions calculation. * It is safe since host enforces necessary restrictions regardless of * the client's decisions. */ effectivePermissions = attr.ownerPerms | attr.groupPerms | attr.otherPerms; } if ((effectivePermissions & mask) != mask) { ret = -EACCES; } LOG(8, ("VMware Hgfs: %s: effectivePermissions: %d, ret: %d\n", __func__, effectivePermissions, ret)); } else { LOG(4, ("VMware Hgfs: %s: HgfsPrivateGetattr failed.\n", __func__)); } return ret; } /* *---------------------------------------------------------------------- * * HgfsPermission -- * * Check for access rights on Hgfs. Called from VFS layer for each * file access. * * Results: * Returns zero on success, or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0) static int HgfsPermission(struct inode *inode, int mask) { LOG(8, ("VMware hgfs: %s: inode->mode: %8x mask: %8x\n", __func__, inode->i_mode, mask)); /* * For sys_access, we go to the host for permission checking; * otherwise return 0. */ if (mask & MAY_ACCESS) { /* For sys_access. */ struct dentry *dentry; #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) struct hlist_node *p; #endif if (mask & MAY_NOT_BLOCK) return -ECHILD; /* Find a dentry with valid d_count. Refer bug 587879. */ hlist_for_each_entry(dentry, #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0) p, #endif &inode->i_dentry, hgfs_d_alias()) { int dcount = hgfs_d_count(dentry); if (dcount) { LOG(4, ("Found %s %d \n", dentry->d_name.name, dcount)); return HgfsAccessInt(dentry, mask & (MAY_READ | MAY_WRITE | MAY_EXEC)); } } ASSERT(FALSE); } return 0; } #else static int HgfsPermission(struct inode *inode, int mask #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) , struct nameidata *nd #elif defined(IPERM_FLAG_RCU) , unsigned int flags #endif ) { LOG(8, ("VMware hgfs: %s: inode->mode: %8x mask: %8x\n", __func__, inode->i_mode, mask)); /* * For sys_access, we go to the host for permission checking; * otherwise return 0. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) if (nd != NULL && (nd->flags & LOOKUP_ACCESS)) { /* For sys_access. */ #else if (mask & MAY_ACCESS) { /* For sys_access. */ #endif struct list_head *pos; /* * In 2.6.38 path walk is done in 2 distinct modes: rcu-walk and * ref-walk. Ref-walk is the classic one; rcu is lockless and is * not allowed to sleep. We insist on using ref-walk since our * transports may sleep. In 3.1 IPERM_FLAG_RCU was replaced with * MAY_NOT_BLOCK. */ #if defined(MAY_NOT_BLOCK) if (mask & MAY_NOT_BLOCK) return -ECHILD; #elif defined(IPERM_FLAG_RCU) if (flags & IPERM_FLAG_RCU) return -ECHILD; #endif /* Find a dentry with valid d_count. Refer bug 587879. */ list_for_each(pos, &inode->i_dentry) { int dcount; struct dentry *dentry = list_entry(pos, struct dentry, hgfs_d_alias()); dcount = hgfs_d_count(dentry); if (dcount) { LOG(4, ("Found %s %d \n", (dentry)->d_name.name, dcount)); return HgfsAccessInt(dentry, mask & (MAY_READ | MAY_WRITE | MAY_EXEC)); } } ASSERT(FALSE); } return 0; } #endif /* *----------------------------------------------------------------------------- * * HgfsGetattr -- * * Hgfs superblock 'getattr' method. * * Results: * 0 on success * error < 0 on failure * * Side effects: * None * *----------------------------------------------------------------------------- */ static int HgfsGetattr(struct vfsmount *mnt, // Unused struct dentry *dentry, // IN struct kstat *stat) // OUT { int err; // XXX ASSERT(mnt); ? --hpreg ASSERT(dentry); ASSERT(stat); err = HgfsRevalidate(dentry); if (err) { return err; } /* Convert stats from the VFS inode format to the kernel format --hpreg */ generic_fillattr(dentry->d_inode, stat); // XXX Should we set stat->blocks and stat->blksize? --hpreg return 0; } /* * Public function implementations. */ /* *---------------------------------------------------------------------- * * HgfsSetattr -- * * Handle a setattr request. Call HgfsSetattrCopy to determine * which fields need updating and convert them to the HgfsAttr * format, then send the request to the server. * * Results: * Returns zero on success, or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ int HgfsSetattr(struct dentry *dentry, // IN: File to set attributes of struct iattr *iattr) // IN: Attributes to set { HgfsReq *req; HgfsStatus replyStatus; int result = 0; Bool changed = FALSE; Bool allowHandleReuse = TRUE; HgfsOp opUsed; ASSERT(dentry); ASSERT(dentry->d_inode); ASSERT(dentry->d_sb); ASSERT(iattr); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSetattr: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: /* Fill out the request packet. */ opUsed = hgfsVersionSetattr; result = HgfsPackSetattrRequest(iattr, dentry, allowHandleReuse, opUsed, req, &changed); if (result != 0 || !changed) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSetattr: no attrs changed\n")); goto out; } /* * Flush all dirty pages prior to sending the request if we're going to * modify the file size or change the last write time. */ if (iattr->ia_valid & ATTR_SIZE || iattr->ia_valid & ATTR_MTIME) { ASSERT(dentry->d_inode->i_mapping); compat_filemap_write_and_wait(dentry->d_inode->i_mapping); } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: /* * If we modified the file size, we must truncate our pages from the * page cache. */ if (iattr->ia_valid & ATTR_SIZE) { result = HgfsTruncatePages(dentry->d_inode, iattr->ia_size); } /* Fallthrough to revalidate. */ case -EPERM: /* * Now that the server's attributes are updated, let's update our * local view of them. Unfortunately, we can't trust iattr, because * the server may have chosen to ignore certain attributes that we * asked it to set. For example, a Windows server will have ignored * the mode nearly entirely. Therefore, rather than calling * inode_setattr() to update the inode with the contents of iattr, * just force a revalidate. * * XXX: Note that EPERM gets similar treatment, as the server may * have updated some of the attributes and still sent us an error. */ HgfsDentryAgeForce(dentry); HgfsRevalidate(dentry); break; case -EBADF: /* * This can happen if we attempted a setattr by handle and the handle * was closed. Because we have no control over the backdoor, it's * possible that an attacker closed our handle, in which case the * driver still thinks the handle is open. So a straight-up * "goto retry" would cause an infinite loop. Instead, let's retry * with a setattr by name. */ if (allowHandleReuse) { allowHandleReuse = FALSE; goto retry; } /* * There's no reason why the server should have sent us this error * when we haven't used a handle. But to prevent an infinite loop in * the driver, let's make sure that we don't retry again. */ break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_SETATTR_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSetattr: Version 3 " "not supported. Falling back to version 2.\n")); hgfsVersionSetattr = HGFS_OP_SETATTR_V2; goto retry; } else if (opUsed == HGFS_OP_SETATTR_V2) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSetattr: Version 2 " "not supported. Falling back to version 1.\n")); hgfsVersionSetattr = HGFS_OP_SETATTR; goto retry; } /* Fallthrough. */ default: break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSetattr: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSetattr: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSetattr: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; } /* *---------------------------------------------------------------------- * * HgfsRevalidate -- * * Called when the kernel wants to check that an inode is still * valid. Called with the dentry that points to the inode we're * interested in. * * We call HgfsPrivateGetattr with the inode's remote name, and if * it succeeds we update the inode's attributes and return zero * (success). Otherwise, we return an error. * * Results: * Returns zero if inode is valid, negative error if not. * * Side effects: * None * *---------------------------------------------------------------------- */ int HgfsRevalidate(struct dentry *dentry) // IN: Dentry to revalidate { int error = 0; HgfsSuperInfo *si; unsigned long age; HgfsInodeInfo *iinfo; ASSERT(dentry); si = HGFS_SB_TO_COMMON(dentry->d_sb); if (!dentry->d_inode) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRevalidate: null input\n")); return -EINVAL; } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsRevalidate: name %s, " "inum %lu\n", dentry->d_name.name, dentry->d_inode->i_ino)); age = jiffies - dentry->d_time; iinfo = INODE_GET_II_P(dentry->d_inode); if (age > si->ttl || iinfo->hostFileId == 0) { HgfsAttrInfo attr; LOG(6, (KERN_DEBUG "VMware hgfs: HgfsRevalidate: dentry is too old, " "getting new attributes\n")); /* * Sync unwritten file data so the file size on the server will * be current with our view of the file. */ compat_filemap_write_and_wait(dentry->d_inode->i_mapping); error = HgfsPrivateGetattr(dentry, &attr, NULL); if (!error) { /* * If server provides file ID, we need to check whether it has changed * since last revalidation. There might be a case that at server side * the same file name has been used for other file during the period. */ if (attr.mask & HGFS_ATTR_VALID_FILEID) { if (iinfo->hostFileId == 0) { /* hostFileId was invalidated, so update it here */ iinfo->hostFileId = attr.hostFileId; } else if (iinfo->hostFileId != attr.hostFileId) { LOG(4, ("VMware hgfs: %s: host file id mismatch. Expected " "%"FMT64"u, got %"FMT64"u.\n", __func__, iinfo->hostFileId, attr.hostFileId)); return -EINVAL; } } /* Update inode's attributes and reset the age. */ HgfsChangeFileAttributes(dentry->d_inode, &attr); HgfsDentryAgeReset(dentry); } } else { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsRevalidate: using cached dentry " "attributes\n")); } return error; } vmhgfs-only/cpNameLite.c 0000444 0000000 0000000 00000005127 13432725346 014226 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * cpNameLite.c -- * * Shared portions of cross-platform name conversion routines used * by hgfs. Unlike the real CP name conversion routines, these ones * just convert path separators to nul characters and vice versa. * */ #include "cpNameLite.h" #include "vm_assert.h" /* *---------------------------------------------------------------------- * * CPNameLite_ConvertTo -- * * Makes a cross-platform lite name representation from the input * string. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ void CPNameLite_ConvertTo(char *bufIn, // IN/OUT: Input to convert size_t inSize, // IN: Size of input buffer char pathSep) // IN: Path separator { size_t pos; ASSERT(bufIn); for (pos = 0; pos < inSize; pos++) { if (bufIn[pos] == pathSep) { bufIn[pos] = '\0'; } } } /* *---------------------------------------------------------------------- * * CPNameLite_ConvertFrom -- * * Converts a cross-platform lite name representation into a string for * use in the local filesystem. This is a cross-platform * implementation and takes the path separator as an * argument. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ void CPNameLite_ConvertFrom(char *bufIn, // IN/OUT: Input to convert size_t inSize, // IN: Size of input buffer char pathSep) // IN: Path separator { size_t pos; ASSERT(bufIn); for (pos = 0; pos < inSize; pos++) { if (bufIn[pos] == '\0') { bufIn[pos] = pathSep; } } } vmhgfs-only/cpNameLinux.c 0000444 0000000 0000000 00000011643 13432725346 014430 0 ustar root root /********************************************************* * Copyright (C) 1998-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * cpNameLinux.c -- * * Linux implementation of cross-platform name conversion * routines used by hgfs. [bac] * */ #if defined sun && !defined SOL9 #include <memory.h> #endif #include "cpName.h" #include "cpNameInt.h" #include "vm_assert.h" #include "hgfsEscape.h" /* *---------------------------------------------------------------------- * * CPName_ConvertFrom -- * * Converts a cross-platform name representation into a string for * use in the local filesystem. * * Results: * Length (not including NUL termination) >= 0 of resulting * string on success. * Negative error on failure (the converted string did not fit in * the buffer provided or the input was invalid). * * Side effects: * None * *---------------------------------------------------------------------- */ int CPName_ConvertFrom(char const **bufIn, // IN/OUT: Input to convert size_t *inSize, // IN/OUT: Size of input size_t *outSize, // IN/OUT: Size of output buffer char **bufOut) // IN/OUT: Output buffer { ASSERT(bufIn); ASSERT(inSize); ASSERT(outSize); ASSERT(bufOut); return CPNameEscapeAndConvertFrom(bufIn, inSize, outSize, bufOut, '/'); } /* *---------------------------------------------------------------------- * * CPName_ConvertFromRoot -- * * Append the appropriate prefix to the output buffer for accessing * the root of the local filesystem. CPName_ConvertFrom prepends * leading path separators before each path component, but only * when the next component has nonzero length, so we still need to * special case this for Linux. * * The pointers and sizes are updated appropriately. * * Results: * Status of name conversion * * Side effects: * None * *---------------------------------------------------------------------- */ HgfsNameStatus CPName_ConvertFromRoot(char const **bufIn, // IN/OUT: Input to convert size_t *inSize, // IN/OUT: Size of input size_t *outSize, // IN/OUT: Size of output buffer char **bufOut) // IN/OUT: Output buffer { char const *next; char *out; int len; ASSERT(bufIn); ASSERT(inSize); ASSERT(outSize); ASSERT(bufOut); out = *bufOut; /* * Get first component */ len = CPName_GetComponent(*bufIn, *bufIn + *inSize, &next); if (len < 0) { Log("%s: get first component failed\n", __FUNCTION__); return HGFS_NAME_STATUS_FAILURE; } /* Space for leading '/' plus NUL termination */ if (*outSize < len + 2) { return HGFS_NAME_STATUS_FAILURE; } /* Put a leading '/' in the output buffer either way */ *out++ = '/'; memcpy(out, *bufIn, len); out += len; /* NUL terminate */ *out = '\0'; *inSize -= next - *bufIn; *outSize -= out - *bufOut; *bufIn = next; *bufOut = out; return HGFS_NAME_STATUS_COMPLETE; } /* *---------------------------------------------------------------------------- * * CPName_ConvertTo -- * * Wrapper function that calls the Linux implementation of _ConvertTo(). * * Makes a cross-platform name representation from the Linux path input * string and writes it into the output buffer. * If the name being converter may be a result a of name escaping the * function performs unescaping. HGFS convention is to always exchange * unescaped names between guest and host and to perform necessary * name escaping on both ends. * * Results: * On success, returns the number of bytes used in the * cross-platform name, NOT including the final terminating NUL * character. On failure, returns a negative error. * * Side effects: * None. * *---------------------------------------------------------------------------- */ int CPName_ConvertTo(char const *nameIn, // IN: Buf to convert size_t bufOutSize, // IN: Size of the output buffer char *bufOut) // OUT: Output buffer { int result; result = CPName_LinuxConvertTo(nameIn, bufOutSize, bufOut); return result; } vmhgfs-only/COPYING 0000444 0000000 0000000 00000043103 13432725341 013063 0 ustar root root GNU GENERAL PUBLIC LICENSE Version 2, June 1991 Copyright (C) 1989, 1991 Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA Everyone is permitted to copy and distribute verbatim copies of this license document, but changing it is not allowed. Preamble The licenses for most software are designed to take away your freedom to share and change it. By contrast, the GNU General Public License is intended to guarantee your freedom to share and change free software--to make sure the software is free for all its users. This General Public License applies to most of the Free Software Foundation's software and to any other program whose authors commit to using it. (Some other Free Software Foundation software is covered by the GNU Lesser General Public License instead.) You can apply it to your programs, too. When we speak of free software, we are referring to freedom, not price. Our General Public Licenses are designed to make sure that you have the freedom to distribute copies of free software (and charge for this service if you wish), that you receive source code or can get it if you want it, that you can change the software or use pieces of it in new free programs; and that you know you can do these things. To protect your rights, we need to make restrictions that forbid anyone to deny you these rights or to ask you to surrender the rights. These restrictions translate to certain responsibilities for you if you distribute copies of the software, or if you modify it. For example, if you distribute copies of such a program, whether gratis or for a fee, you must give the recipients all the rights that you have. You must make sure that they, too, receive or can get the source code. And you must show them these terms so they know their rights. We protect your rights with two steps: (1) copyright the software, and (2) offer you this license which gives you legal permission to copy, distribute and/or modify the software. Also, for each author's protection and ours, we want to make certain that everyone understands that there is no warranty for this free software. If the software is modified by someone else and passed on, we want its recipients to know that what they have is not the original, so that any problems introduced by others will not reflect on the original authors' reputations. Finally, any free program is threatened constantly by software patents. We wish to avoid the danger that redistributors of a free program will individually obtain patent licenses, in effect making the program proprietary. To prevent this, we have made it clear that any patent must be licensed for everyone's free use or not licensed at all. The precise terms and conditions for copying, distribution and modification follow. GNU GENERAL PUBLIC LICENSE TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION 0. This License applies to any program or other work which contains a notice placed by the copyright holder saying it may be distributed under the terms of this General Public License. The "Program", below, refers to any such program or work, and a "work based on the Program" means either the Program or any derivative work under copyright law: that is to say, a work containing the Program or a portion of it, either verbatim or with modifications and/or translated into another language. (Hereinafter, translation is included without limitation in the term "modification".) Each licensee is addressed as "you". Activities other than copying, distribution and modification are not covered by this License; they are outside its scope. The act of running the Program is not restricted, and the output from the Program is covered only if its contents constitute a work based on the Program (independent of having been made by running the Program). Whether that is true depends on what the Program does. 1. You may copy and distribute verbatim copies of the Program's source code as you receive it, in any medium, provided that you conspicuously and appropriately publish on each copy an appropriate copyright notice and disclaimer of warranty; keep intact all the notices that refer to this License and to the absence of any warranty; and give any other recipients of the Program a copy of this License along with the Program. You may charge a fee for the physical act of transferring a copy, and you may at your option offer warranty protection in exchange for a fee. 2. You may modify your copy or copies of the Program or any portion of it, thus forming a work based on the Program, and copy and distribute such modifications or work under the terms of Section 1 above, provided that you also meet all of these conditions: a) You must cause the modified files to carry prominent notices stating that you changed the files and the date of any change. b) You must cause any work that you distribute or publish, that in whole or in part contains or is derived from the Program or any part thereof, to be licensed as a whole at no charge to all third parties under the terms of this License. c) If the modified program normally reads commands interactively when run, you must cause it, when started running for such interactive use in the most ordinary way, to print or display an announcement including an appropriate copyright notice and a notice that there is no warranty (or else, saying that you provide a warranty) and that users may redistribute the program under these conditions, and telling the user how to view a copy of this License. (Exception: if the Program itself is interactive but does not normally print such an announcement, your work based on the Program is not required to print an announcement.) These requirements apply to the modified work as a whole. If identifiable sections of that work are not derived from the Program, and can be reasonably considered independent and separate works in themselves, then this License, and its terms, do not apply to those sections when you distribute them as separate works. But when you distribute the same sections as part of a whole which is a work based on the Program, the distribution of the whole must be on the terms of this License, whose permissions for other licensees extend to the entire whole, and thus to each and every part regardless of who wrote it. Thus, it is not the intent of this section to claim rights or contest your rights to work written entirely by you; rather, the intent is to exercise the right to control the distribution of derivative or collective works based on the Program. In addition, mere aggregation of another work not based on the Program with the Program (or with a work based on the Program) on a volume of a storage or distribution medium does not bring the other work under the scope of this License. 3. You may copy and distribute the Program (or a work based on it, under Section 2) in object code or executable form under the terms of Sections 1 and 2 above provided that you also do one of the following: a) Accompany it with the complete corresponding machine-readable source code, which must be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, b) Accompany it with a written offer, valid for at least three years, to give any third party, for a charge no more than your cost of physically performing source distribution, a complete machine-readable copy of the corresponding source code, to be distributed under the terms of Sections 1 and 2 above on a medium customarily used for software interchange; or, c) Accompany it with the information you received as to the offer to distribute corresponding source code. (This alternative is allowed only for noncommercial distribution and only if you received the program in object code or executable form with such an offer, in accord with Subsection b above.) The source code for a work means the preferred form of the work for making modifications to it. For an executable work, complete source code means all the source code for all modules it contains, plus any associated interface definition files, plus the scripts used to control compilation and installation of the executable. However, as a special exception, the source code distributed need not include anything that is normally distributed (in either source or binary form) with the major components (compiler, kernel, and so on) of the operating system on which the executable runs, unless that component itself accompanies the executable. If distribution of executable or object code is made by offering access to copy from a designated place, then offering equivalent access to copy the source code from the same place counts as distribution of the source code, even though third parties are not compelled to copy the source along with the object code. 4. You may not copy, modify, sublicense, or distribute the Program except as expressly provided under this License. Any attempt otherwise to copy, modify, sublicense or distribute the Program is void, and will automatically terminate your rights under this License. However, parties who have received copies, or rights, from you under this License will not have their licenses terminated so long as such parties remain in full compliance. 5. You are not required to accept this License, since you have not signed it. However, nothing else grants you permission to modify or distribute the Program or its derivative works. These actions are prohibited by law if you do not accept this License. Therefore, by modifying or distributing the Program (or any work based on the Program), you indicate your acceptance of this License to do so, and all its terms and conditions for copying, distributing or modifying the Program or works based on it. 6. Each time you redistribute the Program (or any work based on the Program), the recipient automatically receives a license from the original licensor to copy, distribute or modify the Program subject to these terms and conditions. You may not impose any further restrictions on the recipients' exercise of the rights granted herein. You are not responsible for enforcing compliance by third parties to this License. 7. If, as a consequence of a court judgment or allegation of patent infringement or for any other reason (not limited to patent issues), conditions are imposed on you (whether by court order, agreement or otherwise) that contradict the conditions of this License, they do not excuse you from the conditions of this License. If you cannot distribute so as to satisfy simultaneously your obligations under this License and any other pertinent obligations, then as a consequence you may not distribute the Program at all. For example, if a patent license would not permit royalty-free redistribution of the Program by all those who receive copies directly or indirectly through you, then the only way you could satisfy both it and this License would be to refrain entirely from distribution of the Program. If any portion of this section is held invalid or unenforceable under any particular circumstance, the balance of the section is intended to apply and the section as a whole is intended to apply in other circumstances. It is not the purpose of this section to induce you to infringe any patents or other property right claims or to contest validity of any such claims; this section has the sole purpose of protecting the integrity of the free software distribution system, which is implemented by public license practices. Many people have made generous contributions to the wide range of software distributed through that system in reliance on consistent application of that system; it is up to the author/donor to decide if he or she is willing to distribute software through any other system and a licensee cannot impose that choice. This section is intended to make thoroughly clear what is believed to be a consequence of the rest of this License. 8. If the distribution and/or use of the Program is restricted in certain countries either by patents or by copyrighted interfaces, the original copyright holder who places the Program under this License may add an explicit geographical distribution limitation excluding those countries, so that distribution is permitted only in or among countries not thus excluded. In such case, this License incorporates the limitation as if written in the body of this License. 9. The Free Software Foundation may publish revised and/or new versions of the General Public License from time to time. Such new versions will be similar in spirit to the present version, but may differ in detail to address new problems or concerns. Each version is given a distinguishing version number. If the Program specifies a version number of this License which applies to it and "any later version", you have the option of following the terms and conditions either of that version or of any later version published by the Free Software Foundation. If the Program does not specify a version number of this License, you may choose any version ever published by the Free Software Foundation. 10. If you wish to incorporate parts of the Program into other free programs whose distribution conditions are different, write to the author to ask for permission. For software which is copyrighted by the Free Software Foundation, write to the Free Software Foundation; we sometimes make exceptions for this. Our decision will be guided by the two goals of preserving the free status of all derivatives of our free software and of promoting the sharing and reuse of software generally. NO WARRANTY 11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. END OF TERMS AND CONDITIONS How to Apply These Terms to Your New Programs If you develop a new program, and you want it to be of the greatest possible use to the public, the best way to achieve this is to make it free software which everyone can redistribute and change under these terms. To do so, attach the following notices to the program. It is safest to attach them to the start of each source file to most effectively convey the exclusion of warranty; and each file should have at least the "copyright" line and a pointer to where the full notice is found. <one line to give the program's name and a brief idea of what it does.> Copyright (C) <year> <name of author> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. <signature of Ty Coon>, 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Lesser General Public License instead of this License. vmhgfs-only/request.h 0000444 0000000 0000000 00000011446 13432725306 013677 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * request.h -- * * Functions dealing with the creation, deletion, and sending of HGFS * requests are defined here. */ #ifndef _HGFS_DRIVER_REQUEST_H_ #define _HGFS_DRIVER_REQUEST_H_ /* Must come before any kernel header file. */ #include "driver-config.h" #include <linux/kref.h> #include <linux/list.h> #include <linux/wait.h> #include "compat_sched.h" #include "compat_spinlock.h" #include "hgfs.h" /* For common HGFS definitions. */ #include "vm_basic_types.h" #include "vm_basic_defs.h" /* Macros for accessing the payload portion of the HGFS request packet. */ #define HGFS_REQ_PAYLOAD(hgfsReq) ((hgfsReq)->payload) /* XXX: Needs change when VMCI is supported. */ #define HGFS_REQ_PAYLOAD_V3(hgfsReq) (HGFS_REQ_PAYLOAD(hgfsReq) + sizeof(HgfsRequest)) #define HGFS_REP_PAYLOAD_V3(hgfsRep) (HGFS_REQ_PAYLOAD(hgfsRep) + sizeof(HgfsReply)) /* * HGFS_REQ_STATE_ALLOCATED: * The filesystem half has allocated the request from the slab * allocator. The request is not on any list. * * HGFS_REQ_STATE_UNSENT: * The filesystem half of the driver has filled in the request fields * and placed the request in the global unsent list. It is now the * request handler's responsibility to submit this request to * the channel. Requests in this state are on the global unsent list. * * HGFS_REQ_STATE_SUBMITTED: * The packet has been sent, but the reply will arrive asynchronously. * The request will be on the hgfsRepPending list, and whenever * the reply arrives, the reply handler will remove the request from * the hgfsRepPending list and stuff the reply into the request's * packet buffer. * * This is only for asynchronous channel communication. * * HGFS_REQ_STATE_COMPLETED: * The request handler sent the request and received a reply. The reply * is stuffed in the request's packet buffer. Requests in this state * are not on any list. */ typedef enum { HGFS_REQ_STATE_ALLOCATED, HGFS_REQ_STATE_UNSENT, HGFS_REQ_STATE_SUBMITTED, HGFS_REQ_STATE_COMPLETED, /* Both header and payload were received. */ } HgfsState; /* * Each page that is sent from guest to host is described in the following * format. */ typedef struct HgfsDataPacket { struct page *page; uint32 offset; uint32 len; } HgfsDataPacket; /* * A request to be sent to the user process. */ typedef struct HgfsReq { /* Reference count */ struct kref kref; /* Links to place the object on various lists. */ struct list_head list; /* ID of the transport (its address) */ void *transportId; /* * When clients wait for the reply to a request, they'll wait on this * wait queue. */ wait_queue_head_t queue; /* Current state of the request. */ HgfsState state; /* ID of this request */ uint32 id; /* Pointer to payload in the buffer */ void *payload; /* Total size of the payload.*/ size_t payloadSize; /* * Size of the data buffer (below), not including size of chunk * used by transport. Must be enough to hold both request and * reply (but not at the same time). Initialized in channels. */ size_t bufferSize; /* * Used by read and write calls. Hgfs client passes in * pages to the vmci channel using datapackets and vmci channel * uses it to pass PA's to the host. */ HgfsDataPacket *dataPacket; /* Number of entries in data packet */ uint32 numEntries; /* * Packet of data, for both incoming and outgoing messages. * Include room for the command. */ unsigned char buffer[]; } HgfsReq; /* Public functions (with respect to the entire module). */ HgfsReq *HgfsGetNewRequest(void); HgfsReq *HgfsCopyRequest(HgfsReq *req); int HgfsSendRequest(HgfsReq *req); HgfsReq *HgfsRequestGetRef(HgfsReq *req); void HgfsRequestPutRef(HgfsReq *req); #define HgfsFreeRequest(req) HgfsRequestPutRef(req) HgfsStatus HgfsReplyStatus(HgfsReq *req); void HgfsCompleteReq(HgfsReq *req); void HgfsFailReq(HgfsReq *req, int error); #endif // _HGFS_DRIVER_REQUEST_H_ vmhgfs-only/hgfsBd.c 0000444 0000000 0000000 00000023466 13432725330 013401 0 ustar root root /********************************************************* * Copyright (C) 1998-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * hgfsBd.c -- * * Backdoor calls used by hgfs pserver. [bac] */ #if defined(__KERNEL__) || defined(_KERNEL) || defined(KERNEL) # include "kernelStubs.h" #else # include <stdio.h> # include <stdlib.h> # include <string.h> # include <errno.h> # include "str.h" // for Str_Strcpy # include "debug.h" #endif #include "vm_assert.h" #include "rpcout.h" #include "hgfs.h" // for common HGFS definitions #include "hgfsBd.h" /* *----------------------------------------------------------------------------- * * HgfsBdGetBufInt -- * * Allocates a buffer to send a hgfs request in. This can be either a * HGFS_PACKET_MAX or HGFS_LARGE_PACKET_MAX size buffer depending on the * external funciton called. * * Results: * Pointer to a buffer that has the correct backdoor command prefix for * sending hgfs requests over the backdoor. * NULL on failure (not enough memory). * * Side effects: * None. * *----------------------------------------------------------------------------- */ static char * HgfsBdGetBufInt(size_t bufSize) { /* * Allocate a buffer that is large enough for an HGFS packet and the * synchronous HGFS command, write the command, and return a pointer that * points into the buffer, after the command. */ size_t len = bufSize + HGFS_SYNC_REQREP_CLIENT_CMD_LEN; char *buf = (char*) calloc(sizeof(char), len); if (!buf) { Debug("HgfsBd_GetBuf: Failed to allocate a bd buffer\n"); return NULL; } Str_Strcpy(buf, HGFS_SYNC_REQREP_CLIENT_CMD, len); return buf + HGFS_SYNC_REQREP_CLIENT_CMD_LEN; } /* *----------------------------------------------------------------------------- * * HgfsBd_GetBuf -- * * Get a buffer of size HGFS_PACKET_MAX to send hgfs requests in. * * Results: * See HgfsBdGetBufInt. * * Side effects: * Allocates memory that must be freed with a call to HgfsBd_PutBuf. * *----------------------------------------------------------------------------- */ char * HgfsBd_GetBuf(void) { return HgfsBdGetBufInt(HGFS_PACKET_MAX); } /* *----------------------------------------------------------------------------- * * HgfsBd_GetLargeBuf -- * * Get a buffer of size HGFS_LARGE_PACKET_MAX to send hgfs requests in. * * Results: * See HgfsBdGetBufInt. * * Side effects: * Allocates memory that must be freed with a call to HgfsBd_PutBuf. * *----------------------------------------------------------------------------- */ char * HgfsBd_GetLargeBuf(void) { return HgfsBdGetBufInt(HGFS_LARGE_PACKET_MAX); } /* *----------------------------------------------------------------------------- * * HgfsBd_PutBuf -- * * Release a buffer obtained with HgfsBd_GetBuf. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ void HgfsBd_PutBuf(char *buf) // IN { ASSERT(buf); free(buf - HGFS_SYNC_REQREP_CLIENT_CMD_LEN); } /* *----------------------------------------------------------------------------- * * HgfsBd_GetChannel -- * * Allocate a new RpcOut channel, and try to open the connection. * * Results: * Pointer to the allocated, opened channel on success. * NULL on failure (not enough memory, or failed to open the connection). * * Side effects: * None * *----------------------------------------------------------------------------- */ RpcOut * HgfsBd_GetChannel(void) { RpcOut *out = RpcOut_Construct(); Bool status; if (!out) { Debug("HgfsBd_GetChannel: Failed to allocate an RpcOut\n"); return NULL; } status = RpcOut_start(out); if (status == FALSE) { RpcOut_Destruct(out); return NULL; } return out; } /* *----------------------------------------------------------------------------- * * HgfsBd_CloseChannel -- * * Close the channel and free the RpcOut object. * * Results: * TRUE if closing the channel succeeded, FALSE if it failed. * * Side effects: * None * *----------------------------------------------------------------------------- */ Bool HgfsBd_CloseChannel(RpcOut *out) // IN: Channel to close and free { Bool success; ASSERT(out); success = RpcOut_stop(out); if (success == TRUE) { RpcOut_Destruct(out); } return success; } /* *----------------------------------------------------------------------------- * * HgfsBd_Dispatch -- * * Get a reply to an hgfs request. We call RpcOut_Sent, which * returns a buffer with the reply in it, and we pass this back to * the caller. * * Results: * On success, returns zero. On failure, returns a negative error. * * Side effects: * None * *----------------------------------------------------------------------------- */ int HgfsBd_Dispatch(RpcOut *out, // IN: Channel to send on char *packetIn, // IN: Buf containing request packet size_t *packetSize, // IN/OUT: Size of packet in/out char const **packetOut) // OUT: Buf containing reply packet { Bool success; Bool rpcStatus; char const *reply; size_t replyLen; char *bdPacket = packetIn - HGFS_SYNC_REQREP_CLIENT_CMD_LEN; ASSERT(out); ASSERT(packetIn); ASSERT(packetSize); ASSERT(packetOut); memcpy(bdPacket, HGFS_SYNC_REQREP_CLIENT_CMD, HGFS_SYNC_REQREP_CLIENT_CMD_LEN); success = RpcOut_send(out, bdPacket, *packetSize + HGFS_CLIENT_CMD_LEN, &rpcStatus, &reply, &replyLen); if (!success || !rpcStatus) { Debug("HgfsBd_Dispatch: RpcOut_send returned failure\n"); return -1; } ASSERT(replyLen <= HGFS_LARGE_PACKET_MAX); *packetOut = reply; *packetSize = replyLen; return 0; } /* *----------------------------------------------------------------------------- * * HgfsBd_Enabled -- * * Test to see if hgfs is enabled on the host. * * Results: * TRUE if hgfs is enabled. * FALSE if hgfs is disabled. * * Side effects: * None * *----------------------------------------------------------------------------- */ Bool HgfsBd_Enabled(RpcOut *out, // IN: RPCI Channel char *requestPacket) // IN: Buffer (obtained from HgfsBd_GetBuf) { char const *replyPacket; // Buffer returned by HgfsBd_Dispatch size_t replyLen; Bool success; Bool rpcStatus; /* * Send a bogus (empty) request to the VMX. If hgfs is disabled on * the host side then the request will fail (because the RPCI call * itself will fail). If hgfs is enabled, we will get a packet back * (it will be an error packet because our request was malformed, * but we just discard it anyway). */ success = RpcOut_send(out, requestPacket - HGFS_CLIENT_CMD_LEN, HGFS_CLIENT_CMD_LEN, &rpcStatus, &replyPacket, &replyLen); if (success && rpcStatus) { ASSERT(replyLen <= HGFS_LARGE_PACKET_MAX); } return success && rpcStatus; } /* *----------------------------------------------------------------------------- * * HgfsBd_OpenBackdoor -- * * Check if the HGFS channel is open, and, if not, open it. This is a * one-stop convenience wrapper around HgfsBd_Enabled, HgfsBd_GetBuf, and * HgfsBd_GetChannel. * * Results: * TRUE if the backdoor is now open, regardless of its previous state. * FALSE if the backdoor could not be opened. * * Side effects: * May open a channel to the host. * *----------------------------------------------------------------------------- */ Bool HgfsBd_OpenBackdoor(RpcOut **out) // IN/OUT: RPCI Channel { char *packetBuffer = NULL; Bool success = FALSE; ASSERT(out); /* Short-circuit: backdoor is already open. */ if (*out != NULL) { return TRUE; } /* Open the channel. */ *out = HgfsBd_GetChannel(); if (*out == NULL) { return FALSE; } /* Allocate a buffer for use in pinging the HGFS server. */ packetBuffer = HgfsBd_GetBuf(); if (packetBuffer == NULL) { goto out; } /* Ping the HGFS server. */ if (!HgfsBd_Enabled(*out, packetBuffer)) { goto out; } success = TRUE; out: if (packetBuffer != NULL) { HgfsBd_PutBuf(packetBuffer); } if (!success && *out != NULL) { HgfsBd_CloseChannel(*out); *out = NULL; } return success; } /* *----------------------------------------------------------------------------- * * HgfsBd_CloseBackdoor -- * * Closes the backdoor channel, if it's open. * * Results: * TRUE if the channel is now closed, regardless of its previous state. * FALSE if we could not close the channel. * * Side effects: * May close the channel to the host. * *----------------------------------------------------------------------------- */ Bool HgfsBd_CloseBackdoor(RpcOut **out) // IN/OUT: RPCI Channel { Bool success = TRUE; ASSERT(out); if (*out != NULL) { if (!HgfsBd_CloseChannel(*out)) { success = FALSE; } *out = NULL; } return success; } vmhgfs-only/cpNameLite.h 0000444 0000000 0000000 00000003257 13432725346 014235 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * cpLiteName.h -- * * Cross-platform "lite" name format used by hgfs. * */ #ifndef __CP_NAME_LITE_H__ #define __CP_NAME_LITE_H__ #if defined __KERNEL__ && defined __linux__ # include "driver-config.h" # include <linux/string.h> #elif defined _KERNEL && defined __FreeBSD__ # include <sys/libkern.h> # define strchr(s,c) index(s,c) #else # include <string.h> #endif #include "vm_basic_types.h" void CPNameLite_ConvertTo(char *bufIn, // IN/OUT: Input to convert size_t inSize, // IN: Size of input buffer char pathSep); // IN: Path separator void CPNameLite_ConvertFrom(char *bufIn, // IN/OUT: Input to convert size_t inSize, // IN: Size of input buffer char pathSep); // IN: Path separator #endif /* __CP_NAME_LITE_H__ */ vmhgfs-only/escBitvector.h 0000444 0000000 0000000 00000007367 13432725346 014656 0 ustar root root /********************************************************* * Copyright (C) 1998-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef _ESC_BITVECTOR_H_ #define _ESC_BITVECTOR_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_DISTRIBUTE // XXX is this true? #include "includeCheck.h" #ifdef __KERNEL__ #include "driver-config.h" #include <linux/string.h> /* Don't include these if compiling for the Solaris or Apple kernels. */ #elif !defined _KERNEL && !defined KERNEL #include <stdlib.h> #include <string.h> #endif #if defined _KERNEL && defined __FreeBSD__ # include <sys/libkern.h> #elif defined KERNEL && defined __APPLE__ # include <string.h> #endif #include "vm_assert.h" #define ESC_BITVECTOR_INDEX(_x) ((_x)>>5) #define ESC_BITVECTOR_MASK(_x) (1<<((_x)&31)) #define ESC_BITVECTOR_SIZE 256 // hardwired size of the bitvector /* *---------------------------------------------------------------------- * * EscBitVector -- * * Taken from bitvector.h, but hard wired for use with the Escape * routines, which always need a bitvector of 256 bits, are never * used in the monitor, and need to work in the linux kernel. [bac] * * *---------------------------------------------------------------------- */ typedef struct EscBitVector { uint32 vector[ESC_BITVECTOR_SIZE/32]; } EscBitVector; /* *---------------------------------------------------------------------- * * EscBitVector_Init -- * * Clear all the bits in this vector. * * Results: * All bits are cleared * *---------------------------------------------------------------------- */ static INLINE void EscBitVector_Init(EscBitVector *bv) { memset(bv, 0, sizeof(EscBitVector)); } /* *---------------------------------------------------------------------- * * EscBitVector_Set, EscBitVector_Clear, EscBitVector_Test -- * * basic operations * * Results: * insertion/deletion/presence to/from/in the set * * EscBitVector_Test returns non-zero if present, 0 otherwise * * *---------------------------------------------------------------------- */ static INLINE void EscBitVector_Set(EscBitVector *bv,int n) { ASSERT(n>=0 && n<ESC_BITVECTOR_SIZE); #ifdef __GNUC__ __asm__ __volatile ( "btsl %1,%0" : "=m" (bv->vector[0]) :"Ir" (n)); #else bv->vector[ESC_BITVECTOR_INDEX(n)] |= ESC_BITVECTOR_MASK(n); #endif } static INLINE void EscBitVector_Clear(EscBitVector *bv,int n) { ASSERT(n>=0 && n<ESC_BITVECTOR_SIZE); #ifdef __GNUC__ __asm__ __volatile ( "btrl %1,%0" : "=m" (bv->vector[0]) :"Ir" (n)); #else bv->vector[ESC_BITVECTOR_INDEX(n)] &= ~ESC_BITVECTOR_MASK(n); #endif } static INLINE int EscBitVector_Test(EscBitVector const *bv, int n) { ASSERT(n>=0 && n<ESC_BITVECTOR_SIZE); #ifdef __GNUC__ { uint32 tmp; __asm__ __volatile ( "btl %2,%1\n\tsbbl %0,%0" : "=r" (tmp) : "m" (bv->vector[0]),"Ir" (n)); return tmp; } #else return ((bv->vector[ESC_BITVECTOR_INDEX(n)] & ESC_BITVECTOR_MASK(n)) != 0); #endif } #endif /* _ESC_BITVECTOR_H_ */ vmhgfs-only/cpNameInt.h 0000444 0000000 0000000 00000004075 13432725346 014071 0 ustar root root /********************************************************* * Copyright (C) 1998-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * cpNameInt.h -- * * Cross-platform name format used by hgfs. * */ #ifndef __CP_NAME_INT_H__ #define __CP_NAME_INT_H__ #include "vm_basic_types.h" /* * Used by CPName_ConvertFrom */ int CPNameConvertFrom(char const **bufIn, // IN/OUT: Input to convert size_t *inSize, // IN/OUT: Size of input size_t *outSize, // IN/OUT: Size of output buffer char **bufOut, // IN/OUT: Output buffer char pathSep); // IN: Path separator character int CPNameEscapeAndConvertFrom(char const **bufIn, // IN/OUT: Input to convert size_t *inSize, // IN/OUT: Size of input size_t *outSize, // IN/OUT: Size of output buffer char **bufOut, // IN/OUT: Output buffer char pathSep); // IN: Path separator character /* * Common code for CPName_ConvertTo */ int CPNameConvertTo(char const *nameIn, // IN: Buf to convert size_t bufOutSize, // IN: Size of the output buffer char *bufOut, // OUT: Output buffer char pathSep); // IN: path separator to use #endif /* __CP_NAME_INT_H__ */ vmhgfs-only/backdoor.h 0000444 0000000 0000000 00000002635 13432725346 013777 0 ustar root root /********************************************************* * Copyright (C) 1999-2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * backdoor.h -- * * First layer of the internal communication channel between guest * applications and vmware */ #ifndef _BACKDOOR_H_ #define _BACKDOOR_H_ #include "vm_basic_types.h" #include "vm_assert.h" #include "backdoor_types.h" #if defined(__cplusplus) extern "C" { #endif void Backdoor(Backdoor_proto *bp); // IN/OUT void Backdoor_InOut(Backdoor_proto *bp); // IN/OUT void Backdoor_HbOut(Backdoor_proto_hb *bp); // IN/OUT void Backdoor_HbIn(Backdoor_proto_hb *bp); // IN/OUT #if defined(__cplusplus) } // extern "C" #endif #endif /* _BACKDOOR_H_ */ vmhgfs-only/transport.c 0000444 0000000 0000000 00000032223 13432725306 014232 0 ustar root root /********************************************************* * Copyright (C) 2009-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * transport.c -- * * This file handles the transport mechanisms available for HGFS. * This acts as a glue between the HGFS filesystem driver and the * actual transport channels (backdoor, tcp, vsock, ...). * * The sends happen in the process context, where as a kernel thread * handles the asynchronous replies. A queue of pending replies is * maintained and is protected by a spinlock. The channel opens and close * is protected by a mutex. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include <linux/errno.h> #include <linux/list.h> #include "compat_mutex.h" #include "compat_sched.h" #include "compat_spinlock.h" #include "compat_version.h" /* Must be included after semaphore.h. */ #include <linux/timer.h> /* Must be included after sched.h. */ #include <linux/interrupt.h> /* for spin_lock_bh */ #include "hgfsDevLinux.h" #include "hgfsProto.h" #include "module.h" #include "request.h" #include "transport.h" #include "vm_assert.h" static HgfsTransportChannel *hgfsChannel; /* Current active channel. */ static compat_mutex_t hgfsChannelLock; /* Lock to protect hgfsChannel. */ static struct list_head hgfsRepPending; /* Reply pending queue. */ static spinlock_t hgfsRepQueueLock; /* Reply pending queue lock. */ /* *---------------------------------------------------------------------- * * HgfsTransportOpenChannel -- * * Opens given communication channel with HGFS server. * * Results: * TRUE on success, FALSE on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static Bool HgfsTransportOpenChannel(HgfsTransportChannel *channel) { Bool ret; switch (channel->status) { case HGFS_CHANNEL_UNINITIALIZED: case HGFS_CHANNEL_DEAD: ret = FALSE; break; case HGFS_CHANNEL_CONNECTED: ret = TRUE; break; case HGFS_CHANNEL_NOTCONNECTED: ret = channel->ops.open(channel); if (ret) { channel->status = HGFS_CHANNEL_CONNECTED; } break; default: ret = FALSE; ASSERT(0); /* Not reached. */ } return ret; } /* *---------------------------------------------------------------------- * * HgfsTransportCloseChannel -- * * Closes currently open communication channel. Has to be called * while holdingChannelLock. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsTransportCloseChannel(HgfsTransportChannel *channel) { if (channel->status == HGFS_CHANNEL_CONNECTED || channel->status == HGFS_CHANNEL_DEAD) { channel->ops.close(channel); channel->status = HGFS_CHANNEL_NOTCONNECTED; } } /* *---------------------------------------------------------------------- * * HgfsTransportSetupNewChannel -- * * Find a new workable channel. * * Results: * TRUE on success, otherwise FALSE. * * Side effects: * None * *---------------------------------------------------------------------- */ static Bool HgfsTransportSetupNewChannel(void) { HgfsTransportChannel *newChannel; newChannel = HgfsGetBdChannel(); LOG(10, (KERN_DEBUG LGPFX "%s CHANNEL: Bd channel\n", __func__)); ASSERT(newChannel); hgfsChannel = newChannel; return HgfsTransportOpenChannel(newChannel); } /* *---------------------------------------------------------------------- * * HgfsTransporAddPendingRequest -- * * Adds a request to the hgfsRepPending queue. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsTransportAddPendingRequest(HgfsReq *req) // IN: Request to add { ASSERT(req); spin_lock_bh(&hgfsRepQueueLock); list_add_tail(&req->list, &hgfsRepPending); spin_unlock_bh(&hgfsRepQueueLock); } /* *---------------------------------------------------------------------- * * HgfsTransportRemovePendingRequest -- * * Dequeues the request from the hgfsRepPending queue. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ void HgfsTransportRemovePendingRequest(HgfsReq *req) // IN: Request to dequeue { ASSERT(req); spin_lock_bh(&hgfsRepQueueLock); list_del_init(&req->list); spin_unlock_bh(&hgfsRepQueueLock); } /* *---------------------------------------------------------------------- * * HgfsTransportFlushPendingRequests -- * * Complete all submitted requests with an error, called when * we are about to tear down communication channel. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsTransportFlushPendingRequests(void) { struct HgfsReq *req; spin_lock_bh(&hgfsRepQueueLock); list_for_each_entry(req, &hgfsRepPending, list) { if (req->state == HGFS_REQ_STATE_SUBMITTED) { LOG(6, (KERN_DEBUG LGPFX "%s: injecting error reply to req id: %d\n", __func__, req->id)); HgfsFailReq(req, -EIO); } } spin_unlock_bh(&hgfsRepQueueLock); } /* *---------------------------------------------------------------------- * * HgfsTransportGetPendingRequest -- * * Attempts to locate request with specified ID in the queue of * pending (waiting for server's reply) requests. * * Results: * NULL if request not found; otherwise address of the request * structure. * * Side effects: * Increments reference count of the request. * *---------------------------------------------------------------------- */ HgfsReq * HgfsTransportGetPendingRequest(HgfsHandle id) // IN: id of the request { HgfsReq *cur, *req = NULL; spin_lock_bh(&hgfsRepQueueLock); list_for_each_entry(cur, &hgfsRepPending, list) { if (cur->id == id) { ASSERT(cur->state == HGFS_REQ_STATE_SUBMITTED); req = HgfsRequestGetRef(cur); break; } } spin_unlock_bh(&hgfsRepQueueLock); return req; } /* *---------------------------------------------------------------------- * * HgfsTransportAllocateRequest -- * * Allocates HGFS request structre using channel-specific allocator. * * Results: * NULL on failure; otherwisepointer to newly allocated request. * * Side effects: * None * *---------------------------------------------------------------------- */ HgfsReq * HgfsTransportAllocateRequest(size_t bufferSize) // IN: size of the buffer { HgfsReq *req = NULL; /* * We use a temporary variable to make sure we stamp the request with * same channel as we used to make allocation since hgfsChannel can * be changed while we do allocation. */ HgfsTransportChannel *currentChannel = hgfsChannel; ASSERT(currentChannel); req = currentChannel->ops.allocate(bufferSize); if (req) { req->transportId = currentChannel; } return req; } /* *---------------------------------------------------------------------- * * HgfsTransportFreeRequest -- * * Free HGFS request structre using channel-specific free function. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------- */ void HgfsTransportFreeRequest(HgfsReq *req) // IN: size of the buffer { /* * We cannot use hgfsChannel structre because global channel could * changes in the meantime. We remember the channel when we do * allocation and call the same channel for de-allocation. Smart. */ HgfsTransportChannel *channel = (HgfsTransportChannel *)req->transportId; channel->ops.free(req); return; } /* *---------------------------------------------------------------------- * * HgfsTransportSendRequest -- * * Sends the request via channel communication. * * Results: * Zero on success, non-zero error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ int HgfsTransportSendRequest(HgfsReq *req) // IN: Request to send { HgfsReq *origReq = req; int ret = -EIO; ASSERT(req); ASSERT(req->state == HGFS_REQ_STATE_UNSENT); ASSERT(req->payloadSize <= req->bufferSize); compat_mutex_lock(&hgfsChannelLock); HgfsTransportAddPendingRequest(req); do { if (unlikely(hgfsChannel->status != HGFS_CHANNEL_CONNECTED)) { if (hgfsChannel->status == HGFS_CHANNEL_DEAD) { HgfsTransportCloseChannel(hgfsChannel); HgfsTransportFlushPendingRequests(); } if (!HgfsTransportSetupNewChannel()) { ret = -EIO; goto out; } } ASSERT(hgfsChannel->ops.send); /* If channel changed since we created request we need to adjust */ if (req->transportId != hgfsChannel) { HgfsTransportRemovePendingRequest(req); if (req != origReq) { HgfsRequestPutRef(req); } req = HgfsCopyRequest(origReq); if (req == NULL) { req = origReq; ret = -ENOMEM; goto out; } HgfsTransportAddPendingRequest(req); } ret = hgfsChannel->ops.send(hgfsChannel, req); if (likely(ret == 0)) break; LOG(4, (KERN_DEBUG LGPFX "%s: send failed with error %d\n", __func__, ret)); if (ret == -EINTR) { /* Don't retry when we are interrupted by some signal. */ goto out; } hgfsChannel->status = HGFS_CHANNEL_DEAD; } while (1); ASSERT(req->state == HGFS_REQ_STATE_COMPLETED || req->state == HGFS_REQ_STATE_SUBMITTED); out: compat_mutex_unlock(&hgfsChannelLock); if (likely(ret == 0)) { /* * Send succeeded, wait for the reply. * Right now, we cannot cancel request once they * are dispatched to the host. */ wait_event(req->queue, req->state == HGFS_REQ_STATE_COMPLETED); } HgfsTransportRemovePendingRequest(req); /* * If we used a copy of request because we changed transport we * need to copy payload back into original request. */ if (req != origReq) { ASSERT(req->payloadSize <= origReq->bufferSize); origReq->payloadSize = req->payloadSize; memcpy(origReq->payload, req->payload, req->payloadSize); HgfsRequestPutRef(req); } return ret; } /* *---------------------------------------------------------------------- * * HgfsTransportInit -- * * Initialize the transport. * * Starts the reply thread, for handling incoming packets on the * connected socket. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ void HgfsTransportInit(void) { INIT_LIST_HEAD(&hgfsRepPending); spin_lock_init(&hgfsRepQueueLock); compat_mutex_init(&hgfsChannelLock); compat_mutex_lock(&hgfsChannelLock); hgfsChannel = HgfsGetBdChannel(); ASSERT(hgfsChannel); compat_mutex_unlock(&hgfsChannelLock); } /* *---------------------------------------------------------------------- * * HgfsTransportMarkDead -- * * Marks current channel as dead so it can be cleaned up and * fails all submitted requests. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ void HgfsTransportMarkDead(void) { LOG(8, (KERN_DEBUG LGPFX "%s entered.\n", __func__)); compat_mutex_lock(&hgfsChannelLock); if (hgfsChannel) { hgfsChannel->status = HGFS_CHANNEL_DEAD; } HgfsTransportFlushPendingRequests(); compat_mutex_unlock(&hgfsChannelLock); } /* *---------------------------------------------------------------------- * * HgfsTransportExit -- * * Teardown the transport. * * Results: * None * * Side effects: * Cleans up everything, frees queues, closes channel. * *---------------------------------------------------------------------- */ void HgfsTransportExit(void) { LOG(8, (KERN_DEBUG LGPFX "%s entered.\n", __func__)); compat_mutex_lock(&hgfsChannelLock); ASSERT(hgfsChannel); HgfsTransportCloseChannel(hgfsChannel); hgfsChannel = NULL; compat_mutex_unlock(&hgfsChannelLock); ASSERT(list_empty(&hgfsRepPending)); LOG(8, (KERN_DEBUG LGPFX "%s exited.\n", __func__)); } vmhgfs-only/inode.h 0000444 0000000 0000000 00000002453 13432725306 013303 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * inode.h -- * * Inode operations for the filesystem portion of the vmhgfs driver. */ #ifndef _HGFS_DRIVER_INODE_H_ #define _HGFS_DRIVER_INODE_H_ /* Must come before any kernel header file. */ #include "driver-config.h" #include "compat_fs.h" /* Public functions (with respect to the entire module). */ int HgfsSetattr(struct dentry *dentry, struct iattr *iattr); int HgfsRevalidate(struct dentry *dentry); #endif // _HGFS_DRIVER_INODE_H_ vmhgfs-only/hgfsEscape.h 0000444 0000000 0000000 00000002513 13432725346 014256 0 ustar root root /********************************************************* * Copyright (C) 2008-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * hgfsEscape.h -- * * Escape and unescape filenames that are not legal on a particular * platform. * */ #ifndef __HGFS_ESCAPE_H__ #define __HGFS_ESCAPE_H__ int HgfsEscape_GetSize(char const *bufIn, uint32 sizeIn); int HgfsEscape_Do(char const *bufIn, uint32 sizeIn, uint32 sizeBufOut, char *bufOut); uint32 HgfsEscape_Undo(char *bufIn, uint32 sizeIn); #endif // __HGFS_ESCAPE_H__ vmhgfs-only/link.c 0000444 0000000 0000000 00000020106 13432725306 013130 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * link.c -- * * Symlink-specific inode operations for the filesystem portion of the * vmhgfs driver. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include "compat_fs.h" #include "compat_namei.h" #include "module.h" #include "hgfsProto.h" #include "fsutil.h" #include "vm_assert.h" /* HGFS symlink operations. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) static const char *HgfsFollowlink(struct dentry *dentry, void **cookie); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) static void *HgfsFollowlink(struct dentry *dentry, struct nameidata *nd); #else static int HgfsFollowlink(struct dentry *dentry, struct nameidata *nd); #endif static int HgfsReadlink(struct dentry *dentry, char __user *buffer, int buflen); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) static void HgfsPutlink(struct inode *unused, void *cookie); #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) static void HgfsPutlink(struct dentry *dentry, struct nameidata *nd, void *cookie); #else static void HgfsPutlink(struct dentry *dentry, struct nameidata *nd); #endif /* HGFS inode operations structure for symlinks. */ struct inode_operations HgfsLinkInodeOperations = { .follow_link = HgfsFollowlink, .readlink = HgfsReadlink, .put_link = HgfsPutlink, }; /* * HGFS symlink operations. */ /* *---------------------------------------------------------------------- * * HgfsFollowlink -- * * Modeled after nfs_follow_link from a 2.4 kernel so it'll work * across all kernel revisions we care about. * * Results: * Returns zero on success, negative error on failure. * * On new kernels: The error is returned as void *. * On older kernels: The error is returned as is. * * Side effects: * None * *---------------------------------------------------------------------- */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) static const char * HgfsFollowlink(struct dentry *dentry, void **cookie) #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) static void * HgfsFollowlink(struct dentry *dentry, // IN: Dentry containing link struct nameidata *nd) // OUT: Contains target dentry #else static int HgfsFollowlink(struct dentry *dentry, // IN: Dentry containing link struct nameidata *nd) // OUT: Contains target dentry #endif { HgfsAttrInfo attr; char *fileName = NULL; int error; ASSERT(dentry); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) ASSERT(nd); #endif if (!dentry) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsFollowlink: null input\n")); error = -EINVAL; goto out; } LOG(6, (KERN_DEBUG "VMware hgfs: %s: calling HgfsPrivateGetattr %s\n", __func__, dentry->d_name.name)); error = HgfsPrivateGetattr(dentry, &attr, &fileName); LOG(6, (KERN_DEBUG "VMware hgfs: %s: HgfsPrivateGetattr %s ret %d\n", __func__, dentry->d_name.name, error)); if (!error) { /* Let's make sure we got called on a symlink. */ if (attr.type != HGFS_FILE_TYPE_SYMLINK || fileName == NULL) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsFollowlink: got called " "on something that wasn't a symlink\n")); error = -EINVAL; kfree(fileName); } else { LOG(6, (KERN_DEBUG "VMware hgfs: %s: calling nd_set_link %s\n", __func__, fileName)); #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) *cookie = fileName; #else nd_set_link(nd, fileName); #endif } } out: #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) if (!error) { return *cookie; } else { return ERR_PTR(error); } #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) return ERR_PTR(error); #else return error; #endif } #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) #define HGFS_DO_READLINK(dentry,buffer,buflen,fileName) \ readlink_copy(buffer, buflen, fileName) #else #define HGFS_DO_READLINK(dentry,buffer,buflen,fileName) \ vfs_readlink(dentry, buffer, buflen, fileName) #endif /* *---------------------------------------------------------------------- * * HgfsReadlink -- * * Modeled after nfs_read_link from a 2.4 kernel so it'll work * across all kernel revisions we care about. * * Results: * Returns zero on success, negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsReadlink(struct dentry *dentry, // IN: Dentry containing link char __user *buffer, // OUT: User buffer to copy link into int buflen) // IN: Length of user buffer { HgfsAttrInfo attr; char *fileName = NULL; int error; ASSERT(dentry); ASSERT(buffer); if (!dentry) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsReadlink: null input\n")); return -EINVAL; } LOG(6, (KERN_DEBUG "VMware hgfs: %s: calling HgfsPrivateGetattr %s\n", __func__, dentry->d_name.name)); error = HgfsPrivateGetattr(dentry, &attr, &fileName); if (!error) { /* Let's make sure we got called on a symlink. */ if (attr.type != HGFS_FILE_TYPE_SYMLINK || fileName == NULL) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsReadlink: got called " "on something that wasn't a symlink\n")); error = -EINVAL; } else { LOG(6, (KERN_DEBUG "VMware hgfs: %s: calling vfs_readlink %s\n", __func__, fileName)); error = HGFS_DO_READLINK(dentry, buffer, buflen, fileName); LOG(6, (KERN_DEBUG "VMware hgfs: %s: vfs_readlink %s ret %dn", __func__, fileName, error)); } kfree(fileName); } return error; } /* *---------------------------------------------------------------------- * * HgfsPutlink -- * * Modeled after page_put_link from a 2.6.9 kernel so it'll work * across all kernel revisions we care about. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) static void HgfsPutlink(struct inode *unused, void *cookie) #elif LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 13) static void HgfsPutlink(struct dentry *dentry, // dentry struct nameidata *nd, // lookup name information void *cookie) // cookie #else static void HgfsPutlink(struct dentry *dentry, // dentry struct nameidata *nd) // lookup name information #endif { char *fileName = NULL; #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 2, 0) fileName = cookie; LOG(6, (KERN_DEBUG "VMware hgfs: %s: put for %s\n", __func__, fileName)); #else LOG(6, (KERN_DEBUG "VMware hgfs: %s: put for %s\n", __func__, dentry->d_name.name)); fileName = nd_get_link(nd); #endif if (!IS_ERR(fileName)) { LOG(6, (KERN_DEBUG "VMware hgfs: %s: putting %s\n", __func__, fileName)); kfree(fileName); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 2, 0) nd_set_link(nd, NULL); #endif } } vmhgfs-only/file.c 0000444 0000000 0000000 00000134047 13432725306 013124 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * file.c -- * * File operations for the filesystem portion of the vmhgfs driver. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include <linux/errno.h> #include <linux/module.h> #include <linux/signal.h> #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 15, 0) #include <linux/uio.h> /* iov_iter_count */ #endif #include "compat_cred.h" #include "compat_fs.h" #include "compat_kernel.h" #include "compat_slab.h" /* Must be after compat_fs.h */ #if defined VMW_USE_AIO #include <linux/aio.h> #endif #include "cpName.h" #include "hgfsProto.h" #include "module.h" #include "request.h" #include "hgfsUtil.h" #include "fsutil.h" #include "vm_assert.h" #include "vm_basic_types.h" /* * Before Linux 2.6.33 only O_DSYNC semantics were implemented, but using * the O_SYNC flag. We continue to use the existing numerical value * for O_DSYNC semantics now, but using the correct symbolic name for it. * This new value is used to request true Posix O_SYNC semantics. It is * defined in this strange way to make sure applications compiled against * new headers get at least O_DSYNC semantics on older kernels. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33) #define HGFS_FILECTL_SYNC(flags) ((flags) & O_DSYNC) #else #define HGFS_FILECTL_SYNC(flags) ((flags) & O_SYNC) #endif #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) typedef struct iov_iter *hgfs_iov; #define HGFS_IOV_TO_COUNT(iov, nr_segs) (iov_iter_count(iov)) #define HGFS_IOV_TO_SEGS(iov, nr_segs) (0) #define HGFS_IOCB_TO_POS(iocb, pos) (iocb->ki_pos) #else typedef const struct iovec *hgfs_iov; #define HGFS_IOV_TO_COUNT(iov, nr_segs) (iov_length(iov, nr_segs)) #define HGFS_IOV_TO_SEGS(iov, nr_segs) (nr_segs) #define HGFS_IOCB_TO_POS(iocb, pos) (pos) #endif /* Private functions. */ static int HgfsPackOpenRequest(struct inode *inode, struct file *file, HgfsOp opUsed, HgfsReq *req); static int HgfsUnpackOpenReply(HgfsReq *req, HgfsOp opUsed, HgfsHandle *file, HgfsLockType *lock); /* HGFS file operations for files. */ static int HgfsOpen(struct inode *inode, struct file *file); #if defined VMW_USE_AIO #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) static ssize_t HgfsFileRead(struct kiocb *iocb, struct iov_iter *to); static ssize_t HgfsFileWrite(struct kiocb *iocb, struct iov_iter *from); #else // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) static ssize_t HgfsFileRead(struct kiocb *iocb, const struct iovec *iov, unsigned long numSegs, loff_t offset); static ssize_t HgfsFileWrite(struct kiocb *iocb, const struct iovec *iov, unsigned long numSegs, loff_t offset); #endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) #else static ssize_t HgfsRead(struct file *file, char __user *buf, size_t count, loff_t *offset); static ssize_t HgfsWrite(struct file *file, const char __user *buf, size_t count, loff_t *offset); #endif static loff_t HgfsSeek(struct file *file, loff_t offset, int origin); static int HgfsFlush(struct file *file #if !defined VMW_FLUSH_HAS_1_ARG ,fl_owner_t id #endif ); #if !defined VMW_FSYNC_31 static int HgfsDoFsync(struct inode *inode); #endif static int HgfsFsync(struct file *file, #if defined VMW_FSYNC_OLD struct dentry *dentry, #elif defined VMW_FSYNC_31 loff_t start, loff_t end, #endif int datasync); static int HgfsMmap(struct file *file, struct vm_area_struct *vma); static int HgfsRelease(struct inode *inode, struct file *file); #ifndef VMW_SENDFILE_NONE #if defined VMW_SENDFILE_OLD static ssize_t HgfsSendfile(struct file *file, loff_t *offset, size_t count, read_actor_t actor, void __user *target); #else /* defined VMW_SENDFILE_NEW */ static ssize_t HgfsSendfile(struct file *file, loff_t *offset, size_t count, read_actor_t actor, void *target); #endif #endif #ifdef VMW_SPLICE_READ static ssize_t HgfsSpliceRead(struct file *file, loff_t *offset, struct pipe_inode_info *pipe, size_t len, unsigned int flags); #endif /* HGFS file operations structure for files. */ struct file_operations HgfsFileFileOperations = { .owner = THIS_MODULE, .open = HgfsOpen, .llseek = HgfsSeek, .flush = HgfsFlush, #if defined VMW_USE_AIO #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 1, 0) /* Fallback to async counterpart, check kernel source read_write.c */ .read = NULL, .write = NULL, .read_iter = HgfsFileRead, .write_iter = HgfsFileWrite, #elif LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) .read = new_sync_read, .write = new_sync_write, .read_iter = HgfsFileRead, .write_iter = HgfsFileWrite, #else // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) .read = do_sync_read, .write = do_sync_write, .aio_read = HgfsFileRead, .aio_write = HgfsFileWrite, #endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) #else .read = HgfsRead, .write = HgfsWrite, #endif .fsync = HgfsFsync, .mmap = HgfsMmap, .release = HgfsRelease, #ifndef VMW_SENDFILE_NONE .sendfile = HgfsSendfile, #endif #ifdef VMW_SPLICE_READ .splice_read = HgfsSpliceRead, #endif }; /* File open mask. */ #define HGFS_FILE_OPEN_MASK (HGFS_OPEN_VALID_MODE | \ HGFS_OPEN_VALID_FLAGS | \ HGFS_OPEN_VALID_SPECIAL_PERMS | \ HGFS_OPEN_VALID_OWNER_PERMS | \ HGFS_OPEN_VALID_GROUP_PERMS | \ HGFS_OPEN_VALID_OTHER_PERMS | \ HGFS_OPEN_VALID_FILE_NAME | \ HGFS_OPEN_VALID_SERVER_LOCK) /* * Private functions. */ /* *---------------------------------------------------------------------- * * HgfsPackOpenRequest -- * * Setup the Open request, depending on the op version. * * Results: * Returns zero on success, or negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsPackOpenRequest(struct inode *inode, // IN: Inode of the file to open struct file *file, // IN: File pointer for this open HgfsOp opUsed, // IN: Op to use HgfsReq *req) // IN/OUT: Packet to write into { char *name; uint32 *nameLength; size_t requestSize; int result; ASSERT(inode); ASSERT(file); ASSERT(req); switch (opUsed) { case HGFS_OP_OPEN_V3: { HgfsRequest *requestHeader; HgfsRequestOpenV3 *requestV3; requestHeader = (HgfsRequest *)HGFS_REQ_PAYLOAD(req); requestHeader->op = opUsed; requestHeader->id = req->id; requestV3 = (HgfsRequestOpenV3 *)HGFS_REQ_PAYLOAD_V3(req); requestSize = HGFS_REQ_PAYLOAD_SIZE_V3(requestV3); /* We'll use these later. */ name = requestV3->fileName.name; nameLength = &requestV3->fileName.length; requestV3->mask = HGFS_FILE_OPEN_MASK; /* Linux clients need case-sensitive lookups. */ requestV3->fileName.flags = 0; requestV3->fileName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; requestV3->fileName.fid = HGFS_INVALID_HANDLE; /* Set mode. */ result = HgfsGetOpenMode(file->f_flags); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: failed to get " "open mode\n")); return -EINVAL; } requestV3->mode = result; /* Set flags. */ result = HgfsGetOpenFlags(file->f_flags); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: failed to get " "open flags\n")); return -EINVAL; } requestV3->flags = result; LOG(4, (KERN_DEBUG "VMware hgfs: %s: mode file %o inode %o -> user %o\n", __func__, file->f_mode, inode->i_mode, (inode->i_mode & S_IRWXU) >> 6)); /* Set permissions. */ requestV3->specialPerms = (inode->i_mode & (S_ISUID | S_ISGID | S_ISVTX)) >> 9; requestV3->ownerPerms = (inode->i_mode & S_IRWXU) >> 6; requestV3->groupPerms = (inode->i_mode & S_IRWXG) >> 3; requestV3->otherPerms = (inode->i_mode & S_IRWXO); /* XXX: Request no lock for now. */ requestV3->desiredLock = HGFS_LOCK_NONE; requestV3->reserved1 = 0; requestV3->reserved2 = 0; break; } case HGFS_OP_OPEN_V2: { HgfsRequestOpenV2 *requestV2; requestV2 = (HgfsRequestOpenV2 *)(HGFS_REQ_PAYLOAD(req)); requestV2->header.op = opUsed; requestV2->header.id = req->id; /* We'll use these later. */ name = requestV2->fileName.name; nameLength = &requestV2->fileName.length; requestSize = sizeof *requestV2; requestV2->mask = HGFS_FILE_OPEN_MASK; /* Set mode. */ result = HgfsGetOpenMode(file->f_flags); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: failed to get " "open mode\n")); return -EINVAL; } requestV2->mode = result; /* Set flags. */ result = HgfsGetOpenFlags(file->f_flags); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: failed to get " "open flags\n")); return -EINVAL; } requestV2->flags = result; /* Set permissions. */ requestV2->specialPerms = (inode->i_mode & (S_ISUID | S_ISGID | S_ISVTX)) >> 9; requestV2->ownerPerms = (inode->i_mode & S_IRWXU) >> 6; requestV2->groupPerms = (inode->i_mode & S_IRWXG) >> 3; requestV2->otherPerms = (inode->i_mode & S_IRWXO); /* XXX: Request no lock for now. */ requestV2->desiredLock = HGFS_LOCK_NONE; break; } case HGFS_OP_OPEN: { HgfsRequestOpen *request; request = (HgfsRequestOpen *)(HGFS_REQ_PAYLOAD(req)); request->header.op = opUsed; request->header.id = req->id; /* We'll use these later. */ name = request->fileName.name; nameLength = &request->fileName.length; requestSize = sizeof *request; /* Set mode. */ result = HgfsGetOpenMode(file->f_flags); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: failed to get " "open mode\n")); return -EINVAL; } request->mode = result; /* Set flags. */ result = HgfsGetOpenFlags(file->f_flags); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: failed to get " "open flags\n")); return -EINVAL; } request->flags = result; /* Set permissions. */ request->permissions = (inode->i_mode & S_IRWXU) >> 6; break; } default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: unexpected " "OP type encountered\n")); return -EPROTO; } /* Build full name to send to server. */ if (HgfsBuildPath(name, req->bufferSize - (requestSize - 1), file->f_dentry) < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: build path " "failed\n")); return -EINVAL; } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: opening \"%s\", " "flags %o, create perms %o\n", name, file->f_flags, file->f_mode)); /* Convert to CP name. */ result = CPName_ConvertTo(name, req->bufferSize - (requestSize - 1), name); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsPackOpenRequest: CP conversion " "failed\n")); return -EINVAL; } *nameLength = (uint32) result; req->payloadSize = requestSize + result; return 0; } /* *---------------------------------------------------------------------- * * HgfsUnpackOpenReply -- * * Get interesting fields out of the Open reply, depending on the op * version. * * Results: * Returns zero on success, or negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsUnpackOpenReply(HgfsReq *req, // IN: Packet with reply inside HgfsOp opUsed, // IN: What request op did we send HgfsHandle *file, // OUT: Handle in reply packet HgfsLockType *lock) // OUT: The server lock we got { HgfsReplyOpenV3 *replyV3; HgfsReplyOpenV2 *replyV2; HgfsReplyOpen *replyV1; size_t replySize; ASSERT(req); ASSERT(file); ASSERT(lock); switch (opUsed) { case HGFS_OP_OPEN_V3: replyV3 = (HgfsReplyOpenV3 *)HGFS_REP_PAYLOAD_V3(req); replySize = HGFS_REP_PAYLOAD_SIZE_V3(replyV3); *file = replyV3->file; *lock = replyV3->acquiredLock; break; case HGFS_OP_OPEN_V2: replyV2 = (HgfsReplyOpenV2 *)(HGFS_REQ_PAYLOAD(req)); replySize = sizeof *replyV2; *file = replyV2->file; *lock = replyV2->acquiredLock; break; case HGFS_OP_OPEN: replyV1 = (HgfsReplyOpen *)(HGFS_REQ_PAYLOAD(req)); replySize = sizeof *replyV1; *file = replyV1->file; *lock = HGFS_LOCK_NONE; break; default: /* This really shouldn't happen since we set opUsed ourselves. */ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackOpenReply: unexpected " "OP type encountered\n")); ASSERT(FALSE); return -EPROTO; } if (req->payloadSize != replySize) { /* * The reply to Open is a fixed size. So the size of the payload * really ought to match the expected size of an HgfsReplyOpen[V2]. */ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackOpenReply: wrong packet " "size\n")); return -EPROTO; } return 0; } /* * HGFS file operations for files. */ /* *---------------------------------------------------------------------- * * HgfsOpen -- * * Called whenever a process opens a file in our filesystem. * * We send an "Open" request to the server with the name stored in * this file's inode. If the Open succeeds, we store the filehandle * sent by the server in the file struct so it can be accessed by * read/write/close. * * Results: * Returns zero if on success, error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsOpen(struct inode *inode, // IN: Inode of the file to open struct file *file) // IN: File pointer for this open { HgfsReq *req; HgfsOp opUsed; HgfsStatus replyStatus; HgfsHandle replyFile; HgfsLockType replyLock; HgfsInodeInfo *iinfo; int result = 0; ASSERT(inode); ASSERT(inode->i_sb); ASSERT(file); ASSERT(file->f_dentry); ASSERT(file->f_dentry->d_inode); iinfo = INODE_GET_II_P(inode); LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s/%s)\n", __func__, file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name)); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: /* * Set up pointers using the proper struct This lets us check the * version exactly once and use the pointers later. */ opUsed = hgfsVersionOpen; result = HgfsPackOpenRequest(inode, file, opUsed, req); if (result != 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: error packing request\n")); goto out; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply and check return status. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: iinfo->createdAndUnopened = FALSE; LOG(10, (KERN_DEBUG "VMware hgfs: HgfsOpen: old hostFileId = " "%"FMT64"u\n", iinfo->hostFileId)); /* * Invalidate the hostFileId as we need to retrieve it from * the server. */ iinfo->hostFileId = 0; result = HgfsUnpackOpenReply(req, opUsed, &replyFile, &replyLock); if (result != 0) { break; } result = HgfsCreateFileInfo(file, replyFile); if (result != 0) { break; } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsOpen: set handle to %u\n", replyFile)); /* * HgfsCreate faked all of the inode's attributes, so by the time * we're done in HgfsOpen, we need to make sure that the attributes * in the inode are real. The following is only necessary when * O_CREAT is set, otherwise we got here after HgfsLookup (which sent * a getattr to the server and got the real attributes). * * In particular, we'd like to at least try and set the inode's * uid/gid to match the caller's. We don't expect this to work, * because Windows servers will ignore it, and Linux servers running * as non-root won't be able to change it, but we're forward thinking * people. * * Either way, we force a revalidate following the setattr so that * we'll get the actual uid/gid from the server. */ if (file->f_flags & O_CREAT) { struct dentry *dparent; struct inode *iparent; /* * This is not the root of our file system so there should always * be a parent. */ ASSERT(file->f_dentry->d_parent); /* * Here we obtain a reference on the parent to make sure it doesn't * go away. This might not be necessary, since the existence of * a child (which we hold a reference to in this call) should * account for a reference in the parent, but it's safe to do so. * Overly cautious and safe is better than risky and broken. * * XXX Note that this and a handful of other hacks wouldn't be * necessary if we actually created the file in our create * implementation (where references and locks are properly held). * We could do this if we were willing to give up support for * O_EXCL on 2.4 kernels. */ dparent = dget(file->f_dentry->d_parent); iparent = dparent->d_inode; HgfsSetUidGid(iparent, file->f_dentry, current_fsuid(), current_fsgid()); dput(dparent); } break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_OPEN_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: Version 3 not " "supported. Falling back to version 2.\n")); hgfsVersionOpen = HGFS_OP_OPEN_V2; goto retry; } /* Retry with Version 1 of Open. Set globally. */ if (opUsed == HGFS_OP_OPEN_V2) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: Version 2 not " "supported. Falling back to version 1.\n")); hgfsVersionOpen = HGFS_OP_OPEN; goto retry; } /* Fallthrough. */ default: break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsOpen: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); /* * If the open failed (for any reason) and we tried to open a newly created * file, we must ensure that the next operation on this inode triggers a * revalidate to the server. This is because the file wasn't created on the * server, yet we currently believe that it was, because we created a fake * inode with a hashed dentry for it in HgfsCreate. We will continue to * believe this until the dentry's ttl expires, which will cause a * revalidate to the server that will reveal the truth. So in order to find * the truth as soon as possible, we'll reset the dentry's last revalidate * time now to force a revalidate the next time someone uses the dentry. * * We're using our own flag to track this case because using O_CREAT isn't * good enough: HgfsOpen will be called with O_CREAT even if the file exists * on the server, and if that's the case, there's no need to revalidate. * * XXX: Note that this will need to be reworked if/when we support hard * links, because multiple dentries will point to the same inode, and * forcing a revalidate on one will not force it on any others. */ if (result != 0 && iinfo->createdAndUnopened == TRUE) { HgfsDentryAgeForce(file->f_dentry); } return result; } #if defined VMW_USE_AIO /* *---------------------------------------------------------------------- * * HgfsGenericFileRead -- * * Called when the kernel initiates an asynchronous read from a file in * our filesystem. Our function is just a thin wrapper around * system generic read function. * * * Results: * Returns the number of bytes read on success, or an error on * failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static ssize_t HgfsGenericFileRead(struct kiocb *iocb, // IN: I/O control block hgfs_iov iov, // IN: Array of I/O vectors unsigned long iovSegs, // IN: Count of I/O vectors loff_t pos) // IN: Position at which to read { ssize_t result; LOG(8, (KERN_DEBUG "VMware hgfs: %s(%lu@%Ld)\n", __func__, (unsigned long)HGFS_IOV_TO_COUNT(iov, iovSegs), (long long) pos)); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) result = generic_file_read_iter(iocb, iov); #else result = generic_file_aio_read(iocb, iov, iovSegs, pos); #endif LOG(8, (KERN_DEBUG "VMware hgfs: %s return %"FMTSZ"d\n", __func__, result)); return result; } /* *---------------------------------------------------------------------- * * HgfsFileRead -- * * Called when the kernel initiates an asynchronous read to a file in * our filesystem. Our function is just a thin wrapper around * generic_file_aio_read() that tries to validate the dentry first. * * Results: * Returns the number of bytes read on success, or an error on * failure. * * Side effects: * None * *---------------------------------------------------------------------- */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) static ssize_t HgfsFileRead(struct kiocb *iocb, // IN: I/O control block struct iov_iter *iov) // OUT: Array of I/O buffers #else // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) static ssize_t HgfsFileRead(struct kiocb *iocb, // IN: I/O control block const struct iovec *iov, // OUT: Array of I/O buffers unsigned long numSegs, // IN: Number of buffers loff_t offset) // IN: Offset at which to read #endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) { ssize_t result; struct dentry *readDentry; loff_t pos; unsigned long iovSegs; ASSERT(iocb); ASSERT(iocb->ki_filp); ASSERT(iocb->ki_filp->f_dentry); ASSERT(iov); pos = HGFS_IOCB_TO_POS(iocb, offset); iovSegs = HGFS_IOV_TO_SEGS(iov, numSegs); readDentry = iocb->ki_filp->f_dentry; LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s/%s)\n", __func__, readDentry->d_parent->d_name.name, readDentry->d_name.name)); result = HgfsRevalidate(readDentry); if (result) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: invalid dentry\n", __func__)); goto out; } result = HgfsGenericFileRead(iocb, iov, iovSegs, pos); out: return result; } /* *---------------------------------------------------------------------- * * HgfsGenericFileWrite -- * * Called when the kernel initiates an asynchronous write to a file in * our filesystem. Our function is just a thin wrapper around * system generic write function. * * * Results: * Returns the number of bytes written on success, or an error on * failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static ssize_t HgfsGenericFileWrite(struct kiocb *iocb, // IN: I/O control block hgfs_iov iov, // IN: Array of I/O vectors unsigned long iovSegs, // IN: Count of I/O vectors loff_t pos) // IN: Position at which to write { ssize_t result; LOG(8, (KERN_DEBUG "VMware hgfs: %s(%lu@%Ld)\n", __func__, (unsigned long)HGFS_IOV_TO_COUNT(iov, iovSegs), (long long) pos)); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) result = generic_file_write_iter(iocb, iov); #else result = generic_file_aio_write(iocb, iov, iovSegs, pos); #endif LOG(8, (KERN_DEBUG "VMware hgfs: %s return %"FMTSZ"d\n", __func__, result)); return result; } /* *---------------------------------------------------------------------- * * HgfsFileWrite -- * * Called when the kernel initiates an asynchronous write to a file in * our filesystem. Our function is just a thin wrapper around * generic_file_aio_write() that tries to validate the dentry first. * * Note that files opened with O_SYNC (or superblocks mounted with * "sync") are synchronously written to by the VFS. * * Results: * Returns the number of bytes written on success, or an error on * failure. * * Side effects: * None * *---------------------------------------------------------------------- */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) ssize_t HgfsFileWrite(struct kiocb *iocb, // IN: I/O control block struct iov_iter *iov) // IN: Array of I/O buffers #else // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) static ssize_t HgfsFileWrite(struct kiocb *iocb, // IN: I/O control block const struct iovec *iov, // IN: Array of I/O buffers unsigned long numSegs, // IN: Number of buffers loff_t offset) // IN: Offset at which to write #endif // LINUX_VERSION_CODE >= KERNEL_VERSION(3, 16, 0) { ssize_t result; struct dentry *writeDentry; HgfsInodeInfo *iinfo; loff_t pos; unsigned long iovSegs; ASSERT(iocb); ASSERT(iocb->ki_filp); ASSERT(iocb->ki_filp->f_dentry); ASSERT(iov); pos = HGFS_IOCB_TO_POS(iocb, offset); iovSegs = HGFS_IOV_TO_SEGS(iov, numSegs); writeDentry = iocb->ki_filp->f_dentry; iinfo = INODE_GET_II_P(writeDentry->d_inode); LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s/%s)\n", __func__, writeDentry->d_parent->d_name.name, writeDentry->d_name.name)); spin_lock(&writeDentry->d_inode->i_lock); /* * Guard against dentry revalidation invalidating the inode underneath us. * * Data is being written and may have valid data in a page in the cache. * This action prevents any invalidating of the inode when a flushing of * cache data occurs prior to syncing the file with the server's attributes. * The flushing of cache data would empty our in memory write pages list and * would cause the inode modified write time to be updated and so the inode * would also be invalidated. */ iinfo->numWbPages++; spin_unlock(&writeDentry->d_inode->i_lock); result = HgfsRevalidate(writeDentry); if (result) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: invalid dentry\n", __func__)); goto out; } result = HgfsGenericFileWrite(iocb, iov, iovSegs, pos); if (result >= 0) { if (IS_SYNC(writeDentry->d_inode) || HGFS_FILECTL_SYNC(iocb->ki_filp->f_flags)) { int error; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) error = vfs_fsync(iocb->ki_filp, 0); #else error = HgfsDoFsync(writeDentry->d_inode); #endif if (error < 0) { result = error; } } } out: spin_lock(&writeDentry->d_inode->i_lock); iinfo->numWbPages--; spin_unlock(&writeDentry->d_inode->i_lock); return result; } #else /* *---------------------------------------------------------------------- * * HgfsRead -- * * Called whenever a process reads from a file in our filesystem. Our * function is just a thin wrapper around generic_read_file() that * tries to validate the dentry first. * * Results: * Returns the number of bytes read on success, or an error on * failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static ssize_t HgfsRead(struct file *file, // IN: File to read from char __user *buf, // OUT: User buffer to copy data into size_t count, // IN: Number of bytes to read loff_t *offset) // IN: Offset at which to read { int result; ASSERT(file); ASSERT(file->f_dentry); ASSERT(buf); ASSERT(offset); LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s/%s,%Zu@%lld)\n", __func__, file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name, count, (long long) *offset)); result = HgfsRevalidate(file->f_dentry); if (result) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRead: invalid dentry\n")); goto out; } result = generic_file_read(file, buf, count, offset); out: return result; } /* *---------------------------------------------------------------------- * * HgfsWrite -- * * Called whenever a process writes to a file in our filesystem. Our * function is just a thin wrapper around generic_write_file() that * tries to validate the dentry first. * * Note that files opened with O_SYNC (or superblocks mounted with * "sync") are synchronously written to by the VFS. * * Results: * Returns the number of bytes written on success, or an error on * failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static ssize_t HgfsWrite(struct file *file, // IN: File to write to const char __user *buf, // IN: User buffer where the data is size_t count, // IN: Number of bytes to write loff_t *offset) // IN: Offset to begin writing at { int result; ASSERT(file); ASSERT(file->f_dentry); ASSERT(file->f_dentry->d_inode); ASSERT(buf); ASSERT(offset); LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s/%s,%Zu@%lld)\n", __func__, file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name, count, (long long) *offset)); result = HgfsRevalidate(file->f_dentry); if (result) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsWrite: invalid dentry\n")); goto out; } result = generic_file_write(file, buf, count, offset); out: return result; } #endif /* *---------------------------------------------------------------------- * * HgfsSeek -- * * Called whenever a process moves the file pointer for a file in our * filesystem. Our function is just a thin wrapper around * generic_file_llseek() that tries to validate the dentry first. * * Results: * Returns the new position of the file pointer on success, * or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static loff_t HgfsSeek(struct file *file, // IN: File to seek loff_t offset, // IN: Number of bytes to seek int origin) // IN: Position to seek from { loff_t result = -1; ASSERT(file); ASSERT(file->f_dentry); LOG(6, (KERN_DEBUG "VMware hgfs: %s(%s/%s, %u, %lld, %d)\n", __func__, file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name, FILE_GET_FI_P(file)->handle, offset, origin)); result = (loff_t) HgfsRevalidate(file->f_dentry); if (result) { LOG(6, (KERN_DEBUG "VMware hgfs: %s: invalid dentry\n", __func__)); goto out; } result = generic_file_llseek(file, offset, origin); out: return result; } #if !defined VMW_FSYNC_31 /* *---------------------------------------------------------------------- * * HgfsDoFsync -- * * Helper for HgfsFlush() and HgfsFsync(). * * The hgfs protocol doesn't support fsync explicityly yet. * So for now, we flush all the pages to presumably honor the * intent of an app calling fsync() which is to get the * data onto persistent storage. As things stand now we're at * the whim of the hgfs server code running on the host to fsync or * not if and when it pleases. * * * Results: * Returns zero on success. Otherwise an error. * * Side effects: * None. * *---------------------------------------------------------------------- */ static int HgfsDoFsync(struct inode *inode) // IN: File we operate on { int ret; LOG(4, (KERN_DEBUG "VMware hgfs: %s(%"FMT64"u)\n", __func__, INODE_GET_II_P(inode)->hostFileId)); ret = compat_filemap_write_and_wait(inode->i_mapping); LOG(4, (KERN_DEBUG "VMware hgfs: %s: returns %d\n", __func__, ret)); return ret; } #endif /* *---------------------------------------------------------------------- * * HgfsFlush -- * * Called when user process calls fflush() on an hgfs file. * Flush all dirty pages and check for write errors. * * * Results: * Returns zero on success. (Currently always succeeds). * * Side effects: * None. * *---------------------------------------------------------------------- */ static int HgfsFlush(struct file *file // IN: file to flush #if !defined VMW_FLUSH_HAS_1_ARG ,fl_owner_t id // IN: id not used #endif ) { int ret = 0; LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s/%s)\n", __func__, file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name)); if ((file->f_mode & FMODE_WRITE) == 0) { goto exit; } /* Flush writes to the server and return any errors */ LOG(6, (KERN_DEBUG "VMware hgfs: %s: calling vfs_sync ... \n", __func__)); #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) ret = vfs_fsync(file, 0); #else ret = HgfsDoFsync(file->f_dentry->d_inode); #endif exit: LOG(4, (KERN_DEBUG "VMware hgfs: %s: returns %d\n", __func__, ret)); return ret; } /* *---------------------------------------------------------------------- * * HgfsFsync -- * * Called when user process calls fsync() on hgfs file. * * The hgfs protocol doesn't support fsync explicitly yet, * so for now, we flush all the pages to presumably honor the * intent of an app calling fsync() which is to get the * data onto persistent storage, and as things stand now we're at * the whim of the hgfs server code running on the host to fsync or * not if and when it pleases. * * Results: * Returns zero on success. (Currently always succeeds). * * Side effects: * None. * *---------------------------------------------------------------------- */ static int HgfsFsync(struct file *file, // IN: File we operate on #if defined VMW_FSYNC_OLD struct dentry *dentry, // IN: Dentry for this file #elif defined VMW_FSYNC_31 loff_t start, // IN: start of range to sync loff_t end, // IN: end of range to sync #endif int datasync) // IN: fdatasync or fsync { int ret = 0; loff_t startRange; loff_t endRange; struct inode *inode; #if defined VMW_FSYNC_31 startRange = start; endRange = end; #else startRange = 0; endRange = MAX_INT64; #endif LOG(4, (KERN_DEBUG "VMware hgfs: %s(%s/%s, %lld, %lld, %d)\n", __func__, file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name, startRange, endRange, datasync)); /* Flush writes to the server and return any errors */ inode = file->f_dentry->d_inode; #if defined VMW_FSYNC_31 ret = filemap_write_and_wait_range(inode->i_mapping, startRange, endRange); #else ret = HgfsDoFsync(inode); #endif LOG(4, (KERN_DEBUG "VMware hgfs: %s: written pages %lld, %lld returns %d)\n", __func__, startRange, endRange, ret)); return ret; } /* *---------------------------------------------------------------------- * * HgfsMmap -- * * Called when user process calls mmap() on hgfs file. This is a very * thin wrapper function- we simply attempt to revalidate the * dentry prior to calling generic_file_mmap(). * * Results: * Returns zero on success. * Returns negative error value on failure * * Side effects: * None. * *---------------------------------------------------------------------- */ static int HgfsMmap(struct file *file, // IN: File we operate on struct vm_area_struct *vma) // IN/OUT: VM area information { int result; ASSERT(file); ASSERT(vma); ASSERT(file->f_dentry); LOG(6, (KERN_DEBUG "VMware hgfs: %s(%s/%s)\n", __func__, file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name)); result = HgfsRevalidate(file->f_dentry); if (result) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: invalid dentry\n", __func__)); goto out; } result = generic_file_mmap(file, vma); out: return result; } /* *---------------------------------------------------------------------- * * HgfsRelease -- * * Called when the last user of a file closes it, i.e. when the * file's f_count becomes zero. * * Results: * Returns zero on success, or an error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsRelease(struct inode *inode, // IN: Inode that this file points to struct file *file) // IN: File that is getting released { HgfsReq *req; HgfsHandle handle; HgfsOp opUsed; HgfsStatus replyStatus; int result = 0; ASSERT(inode); ASSERT(file); ASSERT(file->f_dentry); ASSERT(file->f_dentry->d_sb); handle = FILE_GET_FI_P(file)->handle; LOG(6, (KERN_DEBUG "VMware hgfs: %s(%s/%s, %u)\n", __func__, file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name, handle)); /* * This may be our last open handle to an inode, so we should flush our * dirty pages before closing it. */ compat_filemap_write_and_wait(inode->i_mapping); HgfsReleaseFileInfo(file); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: out of memory while " "getting new request\n")); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionClose; if (opUsed == HGFS_OP_CLOSE_V3) { HgfsRequest *header; HgfsRequestCloseV3 *request; header = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); header->id = req->id; header->op = opUsed; request = (HgfsRequestCloseV3 *)(HGFS_REQ_PAYLOAD_V3(req)); request->file = handle; request->reserved = 0; req->payloadSize = HGFS_REQ_PAYLOAD_SIZE_V3(request); } else { HgfsRequestClose *request; request = (HgfsRequestClose *)(HGFS_REQ_PAYLOAD(req)); request->header.id = req->id; request->header.op = opUsed; request->file = handle; req->payloadSize = sizeof *request; } /* Send the request and process the reply. */ result = HgfsSendRequest(req); if (result == 0) { /* Get the reply. */ replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); switch (result) { case 0: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: released handle %u\n", handle)); break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (opUsed == HGFS_OP_CLOSE_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: Version 3 not " "supported. Falling back to version 1.\n")); hgfsVersionClose = HGFS_OP_CLOSE; goto retry; } break; default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: failed handle %u\n", handle)); break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: timed out\n")); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: server " "returned error: %d\n", result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsRelease: unknown error: " "%d\n", result)); } out: HgfsFreeRequest(req); return result; } #ifndef VMW_SENDFILE_NONE /* *----------------------------------------------------------------------------- * * HgfsSendfile -- * * sendfile() wrapper for HGFS. Note that this is for sending a file * from HGFS to another filesystem (or socket). To use HGFS as the * destination file in a call to sendfile(), we must implement sendpage() * as well. * * Like mmap(), we're just interested in validating the dentry and then * calling into generic_file_sendfile(). * * Results: * Returns number of bytes written on success, or an error on failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ #if defined VMW_SENDFILE_OLD static ssize_t HgfsSendfile(struct file *file, // IN: File to read from loff_t *offset, // IN/OUT: Where to start reading size_t count, // IN: How much to read read_actor_t actor, // IN: Routine to send a page of data void __user *target) // IN: Destination file/socket #elif defined VMW_SENDFILE_NEW static ssize_t HgfsSendfile(struct file *file, // IN: File to read from loff_t *offset, // IN/OUT: Where to start reading size_t count, // IN: How much to read read_actor_t actor, // IN: Routine to send a page of data void *target) // IN: Destination file/socket #endif { ssize_t result; ASSERT(file); ASSERT(file->f_dentry); ASSERT(target); ASSERT(offset); ASSERT(actor); LOG(6, (KERN_DEBUG "VMware hgfs: HgfsSendfile: was called\n")); result = HgfsRevalidate(file->f_dentry); if (result) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSendfile: invalid dentry\n")); goto out; } result = generic_file_sendfile (file, offset, count, actor, target); out: return result; } #endif #ifdef VMW_SPLICE_READ /* *----------------------------------------------------------------------------- * * HgfsSpliceRead -- * * splice_read() wrapper for HGFS. Note that this is for sending a file * from HGFS to another filesystem (or socket). To use HGFS as the * destination file in a call to splice, we must implement splice_write() * as well. * * Like mmap(), we're just interested in validating the dentry and then * calling into generic_file_splice_read(). * * Results: * Returns number of bytes written on success, or an error on failure. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static ssize_t HgfsSpliceRead(struct file *file, // IN: File to read from loff_t *offset, // IN/OUT: Where to start reading struct pipe_inode_info *pipe, // IN: Pipe where to write data size_t len, // IN: How much to read unsigned int flags) // IN: Various flags { ssize_t result; ASSERT(file); ASSERT(file->f_dentry); LOG(6, (KERN_DEBUG "VMware hgfs: %s(%s/%s, %lu@%Lu)\n", __func__, file->f_dentry->d_parent->d_name.name, file->f_dentry->d_name.name, (unsigned long) len, (unsigned long long) *offset)); result = HgfsRevalidate(file->f_dentry); if (result) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: invalid dentry\n", __func__)); goto out; } result = generic_file_splice_read(file, offset, pipe, len, flags); out: return result; } #endif vmhgfs-only/backdoorGcc64.c 0000444 0000000 0000000 00000015311 13432725346 014554 0 ustar root root /********************************************************* * Copyright (C) 2005-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * backdoorGcc64.c -- * * Implements the real work for guest-side backdoor for GCC, 64-bit * target (supports inline ASM, GAS syntax). The asm sections are marked * volatile since vmware can change the registers content without the * compiler knowing it. * * See backdoorGCC32.c (from which this code was mostly copied) for * details on why the ASM is written this way. Also note that it might be * possible to write the asm blocks using the symbolic operand specifiers * in such a way that the same asm would generate correct code for both * 32-bit and 64-bit targets, but I'm too lazy to figure it all out. * --rrdharan */ #ifdef __cplusplus extern "C" { #endif #include "backdoor.h" #include "backdoorInt.h" /* *---------------------------------------------------------------------------- * * Backdoor_InOut -- * * Send a low-bandwidth basic request (16 bytes) to vmware, and return its * reply (24 bytes). * * Results: * Host-side response returned in bp IN/OUT parameter. * * Side effects: * Pokes the backdoor. * *---------------------------------------------------------------------------- */ void Backdoor_InOut(Backdoor_proto *myBp) // IN/OUT { uint64 dummy; __asm__ __volatile__( #ifdef __APPLE__ /* * Save %rbx on the stack because the Mac OS GCC doesn't want us to * clobber it - it erroneously thinks %rbx is the PIC register. * (Radar bug 7304232) */ "pushq %%rbx" "\n\t" #endif "pushq %%rax" "\n\t" "movq 40(%%rax), %%rdi" "\n\t" "movq 32(%%rax), %%rsi" "\n\t" "movq 24(%%rax), %%rdx" "\n\t" "movq 16(%%rax), %%rcx" "\n\t" "movq 8(%%rax), %%rbx" "\n\t" "movq (%%rax), %%rax" "\n\t" "inl %%dx, %%eax" "\n\t" /* NB: There is no inq instruction */ "xchgq %%rax, (%%rsp)" "\n\t" "movq %%rdi, 40(%%rax)" "\n\t" "movq %%rsi, 32(%%rax)" "\n\t" "movq %%rdx, 24(%%rax)" "\n\t" "movq %%rcx, 16(%%rax)" "\n\t" "movq %%rbx, 8(%%rax)" "\n\t" "popq (%%rax)" "\n\t" #ifdef __APPLE__ "popq %%rbx" "\n\t" #endif : "=a" (dummy) : "0" (myBp) /* * vmware can modify the whole VM state without the compiler knowing * it. So far it does not modify EFLAGS. --hpreg */ : #ifndef __APPLE__ /* %rbx is unchanged at the end of the function on Mac OS. */ "rbx", #endif "rcx", "rdx", "rsi", "rdi", "memory" ); } /* *----------------------------------------------------------------------------- * * BackdoorHbIn -- * BackdoorHbOut -- * * Send a high-bandwidth basic request to vmware, and return its * reply. * * Results: * Host-side response returned in bp IN/OUT parameter. * * Side-effects: * Pokes the high-bandwidth backdoor port. * *----------------------------------------------------------------------------- */ void BackdoorHbIn(Backdoor_proto_hb *myBp) // IN/OUT { uint64 dummy; __asm__ __volatile__( "pushq %%rbp" "\n\t" #ifdef __APPLE__ /* * Save %rbx on the stack because the Mac OS GCC doesn't want us to * clobber it - it erroneously thinks %rbx is the PIC register. * (Radar bug 7304232) */ "pushq %%rbx" "\n\t" #endif "pushq %%rax" "\n\t" "movq 48(%%rax), %%rbp" "\n\t" "movq 40(%%rax), %%rdi" "\n\t" "movq 32(%%rax), %%rsi" "\n\t" "movq 24(%%rax), %%rdx" "\n\t" "movq 16(%%rax), %%rcx" "\n\t" "movq 8(%%rax), %%rbx" "\n\t" "movq (%%rax), %%rax" "\n\t" "cld" "\n\t" "rep; insb" "\n\t" "xchgq %%rax, (%%rsp)" "\n\t" "movq %%rbp, 48(%%rax)" "\n\t" "movq %%rdi, 40(%%rax)" "\n\t" "movq %%rsi, 32(%%rax)" "\n\t" "movq %%rdx, 24(%%rax)" "\n\t" "movq %%rcx, 16(%%rax)" "\n\t" "movq %%rbx, 8(%%rax)" "\n\t" "popq (%%rax)" "\n\t" #ifdef __APPLE__ "popq %%rbx" "\n\t" #endif "popq %%rbp" : "=a" (dummy) : "0" (myBp) /* * vmware can modify the whole VM state without the compiler knowing * it. --hpreg */ : #ifndef __APPLE__ /* %rbx is unchanged at the end of the function on Mac OS. */ "rbx", #endif "rcx", "rdx", "rsi", "rdi", "memory", "cc" ); } void BackdoorHbOut(Backdoor_proto_hb *myBp) // IN/OUT { uint64 dummy; __asm__ __volatile__( "pushq %%rbp" "\n\t" #ifdef __APPLE__ /* * Save %rbx on the stack because the Mac OS GCC doesn't want us to * clobber it - it erroneously thinks %rbx is the PIC register. * (Radar bug 7304232) */ "pushq %%rbx" "\n\t" #endif "pushq %%rax" "\n\t" "movq 48(%%rax), %%rbp" "\n\t" "movq 40(%%rax), %%rdi" "\n\t" "movq 32(%%rax), %%rsi" "\n\t" "movq 24(%%rax), %%rdx" "\n\t" "movq 16(%%rax), %%rcx" "\n\t" "movq 8(%%rax), %%rbx" "\n\t" "movq (%%rax), %%rax" "\n\t" "cld" "\n\t" "rep; outsb" "\n\t" "xchgq %%rax, (%%rsp)" "\n\t" "movq %%rbp, 48(%%rax)" "\n\t" "movq %%rdi, 40(%%rax)" "\n\t" "movq %%rsi, 32(%%rax)" "\n\t" "movq %%rdx, 24(%%rax)" "\n\t" "movq %%rcx, 16(%%rax)" "\n\t" "movq %%rbx, 8(%%rax)" "\n\t" "popq (%%rax)" "\n\t" #ifdef __APPLE__ "popq %%rbx" "\n\t" #endif "popq %%rbp" : "=a" (dummy) : "0" (myBp) : #ifndef __APPLE__ /* %rbx is unchanged at the end of the function on Mac OS. */ "rbx", #endif "rcx", "rdx", "rsi", "rdi", "memory", "cc" ); } #ifdef __cplusplus } #endif vmhgfs-only/hgfsBd.h 0000444 0000000 0000000 00000002720 13432725330 013374 0 ustar root root /********************************************************* * Copyright (C) 1998-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ #ifndef _HGFS_BD_H_ # define _HGFS_BD_H_ /* * hgfsBd.h -- * * Backdoor calls used by hgfs clients. */ #include "rpcout.h" char *HgfsBd_GetBuf(void); char *HgfsBd_GetLargeBuf(void); void HgfsBd_PutBuf(char *); RpcOut *HgfsBd_GetChannel(void); Bool HgfsBd_CloseChannel(RpcOut *out); int HgfsBd_Dispatch(RpcOut *out, char *packetIn, size_t *packetSize, char const **packetOut); Bool HgfsBd_Enabled(RpcOut *out, char *requestPacket); Bool HgfsBd_OpenBackdoor(RpcOut **out); Bool HgfsBd_CloseBackdoor(RpcOut **out); #endif // _HGFS_BD_H_ vmhgfs-only/hgfsDevLinux.h 0000444 0000000 0000000 00000010644 13432725346 014620 0 ustar root root /********************************************************* * Copyright (C) 1998-2017 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * hgfsDev.h -- * * Header for code shared between the hgfs linux kernel module driver * and the pserver. */ #ifndef _HGFS_DEV_H_ #define _HGFS_DEV_H_ #include "vm_basic_types.h" #include "hgfs.h" #define HGFS_NAME "vmhgfs" // Name of FS (e.g. "mount -t vmhgfs") #define HGFS_FUSENAME "vmhgfs-fuse" // Name of FS (e.g. "-o subtype=vmhgfs-fuse") #define HGFS_FUSETYPE "fuse." HGFS_FUSENAME // Type of FS (e.g. "fuse.vmhgfs-fuse") #define HGFS_MOUNT_POINT "/mnt/hgfs" // Type of FS (e.g. vmhgfs-fuse ) #define HGFS_DEVICE_NAME "dev" // Name of our device under /proc/fs/HGFS_NAME/ #define HGFS_SUPER_MAGIC 0xbacbacbc // Superblock magic number #define HGFS_DEFAULT_TTL 1 // Default TTL for dentries typedef enum { HGFS_MOUNTINFO_VERSION_NONE, HGFS_MOUNTINFO_VERSION_1, HGFS_MOUNTINFO_VERSION_2, } HgfsMountInfoVersion; /* * The mount info flags. * These specify flags from options parsed on the mount command line. */ #define HGFS_MNTINFO_SERVER_INO (1 << 0) /* Use server inode numbers? */ /* * Mount information, passed from pserver process to kernel * at mount time. * * XXX: I'm hijacking this struct. In the future, when the Solaris HGFS driver * loses its pserver, the struct will be used by /sbin/mount.vmhgfs solely. * As is, it is also used by the Solaris pserver. */ typedef struct HgfsMountInfo { uint32 magicNumber; // hgfs magic number uint32 infoSize; // HgfsMountInfo structure size HgfsMountInfoVersion version; // HgfsMountInfo structure version uint32 fd; // file descriptor of client file uint32 flags; // hgfs specific mount flags #ifndef sun uid_t uid; // desired owner of files Bool uidSet; // is the owner actually set? gid_t gid; // desired group of files Bool gidSet; // is the group actually set? unsigned short fmask; // desired file mask unsigned short dmask; // desired directory mask uint32 ttl; // number of seconds before revalidating dentries #if defined __APPLE__ char shareNameHost[MAXPATHLEN]; // must be ".host" char shareNameDir[MAXPATHLEN]; // desired share name for mounting #else const char *shareNameHost; // must be ".host" const char *shareNameDir; // desired share name for mounting #endif #endif } #if __GNUC__ __attribute__((__packed__)) #else # error Compiler packing... #endif HgfsMountInfo; /* * Version 1 of the MountInfo object. * This is used so that newer kernel clients can allow mounts using * older versions of the mounter application for backwards compatibility. */ typedef struct HgfsMountInfoV1 { uint32 magicNumber; // hgfs magic number uint32 version; // protocol version uint32 fd; // file descriptor of client file #ifndef sun uid_t uid; // desired owner of files Bool uidSet; // is the owner actually set? gid_t gid; // desired group of files Bool gidSet; // is the group actually set? unsigned short fmask; // desired file mask unsigned short dmask; // desired directory mask uint32 ttl; // number of seconds before revalidating dentries #if defined __APPLE__ char shareNameHost[MAXPATHLEN]; // must be ".host" char shareNameDir[MAXPATHLEN]; // desired share name for mounting #else const char *shareNameHost; // must be ".host" const char *shareNameDir; // desired share name for mounting #endif #endif } HgfsMountInfoV1; #endif //ifndef _HGFS_DEV_H_ vmhgfs-only/cpName.h 0000444 0000000 0000000 00000007574 13432725346 013425 0 ustar root root /********************************************************* * Copyright (C) 1998-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * cpName.h -- * * Cross-platform name format used by hgfs. * */ #ifndef __CP_NAME_H__ #define __CP_NAME_H__ #ifdef __KERNEL__ # include "driver-config.h" # include <linux/string.h> #elif defined __FreeBSD__ # if defined _KERNEL # include <sys/libkern.h> # define strchr(s,c) index(s,c) # else # include <string.h> # endif #elif defined __APPLE__ && defined KERNEL # include <string.h> #elif !defined sun # include <stdlib.h> # include <string.h> #endif #include "vm_basic_types.h" /* Status codes for processing share names */ typedef enum { HGFS_NAME_STATUS_COMPLETE, /* Name is complete */ HGFS_NAME_STATUS_FAILURE, /* Name processing failed */ HGFS_NAME_STATUS_INCOMPLETE_BASE, /* Name is base of namespace */ HGFS_NAME_STATUS_INCOMPLETE_ROOT, /* Name is "root" only */ HGFS_NAME_STATUS_INCOMPLETE_DRIVE, /* Name is "root drive" only */ HGFS_NAME_STATUS_INCOMPLETE_UNC, /* Name is "root unc" only */ HGFS_NAME_STATUS_INCOMPLETE_UNC_MACH, /* Name is "root unc <x>" only */ HGFS_NAME_STATUS_DOES_NOT_EXIST, /* Name does not exist */ HGFS_NAME_STATUS_ACCESS_DENIED, /* Desired access to share denied */ HGFS_NAME_STATUS_SYMBOLIC_LINK, /* Name contains a symbolic link */ HGFS_NAME_STATUS_OUT_OF_MEMORY, /* Out of memory while processing */ HGFS_NAME_STATUS_TOO_LONG, /* Name has overly long component */ HGFS_NAME_STATUS_NOT_A_DIRECTORY, /* Name has path component not a dir */ } HgfsNameStatus; int CPName_ConvertTo(char const *nameIn, // IN: The buf to convert size_t bufOutSize, // IN: The size of the output buffer char *bufOut); // OUT: The output buffer int CPName_LinuxConvertTo(char const *nameIn, // IN: buf to convert size_t bufOutSize, // IN: size of the output buffer char *bufOut); // OUT: output buffer int CPName_WindowsConvertTo(char const *nameIn, // IN: buf to convert size_t bufOutSize, // IN: size of the output buffer char *bufOut); // OUT: output buffer int CPName_ConvertFrom(char const **bufIn, // IN/OUT: Input to convert size_t *inSize, // IN/OUT: Size of input buffer size_t *outSize, // IN/OUT: Size of output buffer char **bufOut); // IN/OUT: Output buffer HgfsNameStatus CPName_ConvertFromRoot(char const **bufIn, // IN/OUT: Input to convert size_t *inSize, // IN/OUT: Size of input size_t *outSize, // IN/OUT: Size of output buf char **bufOut); // IN/OUT: Output buffer int CPName_GetComponent(char const *begin, // IN: Beginning of buffer char const *end, // IN: End of buffer char const **next); // OUT: Next component char const * CPName_Print(char const *in, // IN: Name to print size_t size); // IN: Size of name #endif /* __CP_NAME_H__ */ vmhgfs-only/hgfsProto.h 0000444 0000000 0000000 00000257363 13432725346 014200 0 ustar root root /********************************************************* * Copyright (C) 1998-2018 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * hgfsProto.h -- * * Header file for data types and message formats used in the * Host/Guest File System (hgfs) protocol. */ #ifndef _HGFS_PROTO_H_ # define _HGFS_PROTO_H_ #define INCLUDE_ALLOW_USERLEVEL #define INCLUDE_ALLOW_MODULE #define INCLUDE_ALLOW_DISTRIBUTE #include "includeCheck.h" #include "vm_basic_types.h" #include "hgfs.h" /* * Handle used by the server to identify files and searches. Used * by the driver to match server replies with pending requests. */ typedef uint32 HgfsHandle; #define HGFS_INVALID_HANDLE ((HgfsHandle)~((HgfsHandle)0)) /* * Opcodes for server operations. * * Changing the ordering of this enum will break the protocol; new ops * should be added at the end (but before HGFS_OP_MAX). */ typedef enum { HGFS_OP_OPEN, /* Open file */ HGFS_OP_READ, /* Read from file */ HGFS_OP_WRITE, /* Write to file */ HGFS_OP_CLOSE, /* Close file */ HGFS_OP_SEARCH_OPEN, /* Start new search */ HGFS_OP_SEARCH_READ, /* Get next search response */ HGFS_OP_SEARCH_CLOSE, /* End a search */ HGFS_OP_GETATTR, /* Get file attributes */ HGFS_OP_SETATTR, /* Set file attributes */ HGFS_OP_CREATE_DIR, /* Create new directory */ HGFS_OP_DELETE_FILE, /* Delete a file */ HGFS_OP_DELETE_DIR, /* Delete a directory */ HGFS_OP_RENAME, /* Rename a file or directory */ HGFS_OP_QUERY_VOLUME_INFO, /* Query volume information */ /* * The following operations are only available in version 2 of the hgfs * protocol. The corresponding version 1 opcodes above are deprecated. */ HGFS_OP_OPEN_V2, /* Open file */ HGFS_OP_GETATTR_V2, /* Get file attributes */ HGFS_OP_SETATTR_V2, /* Set file attributes */ HGFS_OP_SEARCH_READ_V2, /* Get next search response */ HGFS_OP_CREATE_SYMLINK, /* Create a symlink */ HGFS_OP_SERVER_LOCK_CHANGE, /* Change the oplock on a file */ HGFS_OP_CREATE_DIR_V2, /* Create a directory */ HGFS_OP_DELETE_FILE_V2, /* Delete a file */ HGFS_OP_DELETE_DIR_V2, /* Delete a directory */ HGFS_OP_RENAME_V2, /* Rename a file or directory */ /* * Operations for version 3, deprecating version 2 operations. */ HGFS_OP_OPEN_V3, /* Open file */ HGFS_OP_READ_V3, /* Read from file */ HGFS_OP_WRITE_V3, /* Write to file */ HGFS_OP_CLOSE_V3, /* Close file */ HGFS_OP_SEARCH_OPEN_V3, /* Start new search */ HGFS_OP_SEARCH_READ_V3, /* Read V3 directory entries */ HGFS_OP_SEARCH_CLOSE_V3, /* End a search */ HGFS_OP_GETATTR_V3, /* Get file attributes */ HGFS_OP_SETATTR_V3, /* Set file attributes */ HGFS_OP_CREATE_DIR_V3, /* Create new directory */ HGFS_OP_DELETE_FILE_V3, /* Delete a file */ HGFS_OP_DELETE_DIR_V3, /* Delete a directory */ HGFS_OP_RENAME_V3, /* Rename a file or directory */ HGFS_OP_QUERY_VOLUME_INFO_V3, /* Query volume information */ HGFS_OP_CREATE_SYMLINK_V3, /* Create a symlink */ HGFS_OP_SERVER_LOCK_CHANGE_V3, /* Change the oplock on a file */ HGFS_OP_WRITE_WIN32_STREAM_V3, /* Write WIN32_STREAM_ID format data to file */ /* * Operations for version 4, deprecating version 3 operations. */ HGFS_OP_CREATE_SESSION_V4, /* Create a session and return host capabilities. */ HGFS_OP_DESTROY_SESSION_V4, /* Destroy/close session. */ HGFS_OP_READ_FAST_V4, /* Read */ HGFS_OP_WRITE_FAST_V4, /* Write */ HGFS_OP_SET_WATCH_V4, /* Start monitoring directory changes. */ HGFS_OP_REMOVE_WATCH_V4, /* Stop monitoring directory changes. */ HGFS_OP_NOTIFY_V4, /* Notification for a directory change event. */ HGFS_OP_SEARCH_READ_V4, /* Read V4 directory entries. */ HGFS_OP_OPEN_V4, /* Open file */ HGFS_OP_ENUMERATE_STREAMS_V4, /* Enumerate alternative named streams for a file. */ HGFS_OP_GETATTR_V4, /* Get file attributes */ HGFS_OP_SETATTR_V4, /* Set file attributes */ HGFS_OP_DELETE_V4, /* Delete a file or a directory */ HGFS_OP_LINKMOVE_V4, /* Rename/move/create hard link. */ HGFS_OP_FSCTL_V4, /* Sending FS control requests. */ HGFS_OP_ACCESS_CHECK_V4, /* Access check. */ HGFS_OP_FSYNC_V4, /* Flush all cached data to the disk. */ HGFS_OP_QUERY_VOLUME_INFO_V4, /* Query volume information. */ HGFS_OP_OPLOCK_ACQUIRE_V4, /* Acquire OPLOCK. */ HGFS_OP_OPLOCK_BREAK_V4, /* Break or downgrade OPLOCK. */ HGFS_OP_LOCK_BYTE_RANGE_V4, /* Acquire byte range lock. */ HGFS_OP_UNLOCK_BYTE_RANGE_V4, /* Release byte range lock. */ HGFS_OP_QUERY_EAS_V4, /* Query extended attributes. */ HGFS_OP_SET_EAS_V4, /* Add or modify extended attributes. */ HGFS_OP_MAX, /* Dummy op, must be last in enum */ HGFS_OP_NEW_HEADER = 0xff, /* Header op, must be unique, distinguishes packet headers. */ } HgfsOp; /* * If we get to where the OP table has grown such that we hit the invalid opcode to * distinguish between header structures in the packet, then we must ensure that there * is no valid HGFS opcode with that same value. * The following assert is designed to force anyone who adds new opcodes which cause the * above condition to occur to verify the opcode values and then can remove this check. */ MY_ASSERTS(hgfsOpValuesAsserts, ASSERT_ON_COMPILE(HGFS_OP_MAX < HGFS_OP_NEW_HEADER); ) /* HGFS protocol versions. */ typedef enum { HGFS_PROTOCOL_VERSION_NONE, HGFS_PROTOCOL_VERSION_1, HGFS_PROTOCOL_VERSION_2, HGFS_PROTOCOL_VERSION_3, HGFS_PROTOCOL_VERSION_4, } HgfsProtocolVersion; /* XXX: Needs change when VMCI is supported. */ #define HGFS_REQ_PAYLOAD_SIZE_V3(hgfsReq) (sizeof *hgfsReq + sizeof(HgfsRequest)) #define HGFS_REP_PAYLOAD_SIZE_V3(hgfsRep) (sizeof *hgfsRep + sizeof(HgfsReply)) /* XXX: Needs change when VMCI is supported. */ #define HGFS_REQ_GET_PAYLOAD_V3(hgfsReq) ((char *)(hgfsReq) + sizeof(HgfsRequest)) #define HGFS_REP_GET_PAYLOAD_V3(hgfsRep) ((char *)(hgfsRep) + sizeof(HgfsReply)) /* * Open flags. * * Changing the order of this enum will break stuff. Do not add any flags to * this enum: it has been frozen and all new flags should be added to * HgfsOpenMode. This was done because HgfsOpenMode could still be converted * to a bitmask (so that it's easier to add flags to) whereas this enum was * already too large. */ typedef enum { // File doesn't exist File exists HGFS_OPEN, // error HGFS_OPEN_EMPTY, // error size = 0 HGFS_OPEN_CREATE, // create HGFS_OPEN_CREATE_SAFE, // create error HGFS_OPEN_CREATE_EMPTY, // create size = 0 } HgfsOpenFlags; /* * Write flags. */ typedef uint8 HgfsWriteFlags; #define HGFS_WRITE_APPEND 1 /* * Permissions bits. * * These are intentionally similar to Unix permissions bits, and we * convert to/from Unix permissions using simple shift operations, so * don't change these or you will break things. */ typedef uint8 HgfsPermissions; #define HGFS_PERM_READ 4 #define HGFS_PERM_WRITE 2 #define HGFS_PERM_EXEC 1 /* * Access mode bits. * * Different operating systems have different set of file access mode. * Here are constants that are rich enough to describe all access modes in an OS * independent way. */ typedef uint32 HgfsAccessMode; /* * Generic access rights control coarse grain access for the file. * A particular generic rigth can be expanded into different set of specific rights * on different OS. */ /* * HGFS_MODE_GENERIC_READ means ability to read file data and read various file * attributes and properties. */ #define HGFS_MODE_GENERIC_READ (1 << 0) /* * HGFS_MODE_GENERIC_WRITE means ability to write file data and updaate various file * attributes and properties. */ #define HGFS_MODE_GENERIC_WRITE (1 << 1) /* * HGFS_MODE_GENERIC_EXECUE means ability to execute file. For network redirectors * ability to execute usualy implies ability to read data; for local file systems * HGFS_MODE_GENERIC_EXECUTE does not imply ability to read data. */ #define HGFS_MODE_GENERIC_EXECUTE (1 << 2) /* Specific rights define fine grain access modes. */ #define HGFS_MODE_READ_DATA (1 << 3) // Ability to read file data #define HGFS_MODE_WRITE_DATA (1 << 4) // Ability to writge file data #define HGFS_MODE_APPEND_DATA (1 << 5) // Appending data to the end of file #define HGFS_MODE_DELETE (1 << 6) // Ability to delete the file #define HGFS_MODE_TRAVERSE_DIRECTORY (1 << 7) // Ability to access files in a directory #define HGFS_MODE_LIST_DIRECTORY (1 << 8) // Ability to list file names #define HGFS_MODE_ADD_SUBDIRECTORY (1 << 9) // Ability to create a new subdirectory #define HGFS_MODE_ADD_FILE (1 << 10) // Ability to create a new file #define HGFS_MODE_DELETE_CHILD (1 << 11) // Ability to delete file/subdirectory #define HGFS_MODE_READ_ATTRIBUTES (1 << 12) // Ability to read attributes #define HGFS_MODE_WRITE_ATTRIBUTES (1 << 13) // Ability to write attributes #define HGFS_MODE_READ_EXTATTRIBUTES (1 << 14) // Ability to read extended attributes #define HGFS_MODE_WRITE_EXTATTRIBUTES (1 << 15) // Ability to write extended attributes #define HGFS_MODE_READ_SECURITY (1 << 16) // Ability to read permissions/ACLs/owner #define HGFS_MODE_WRITE_SECURITY (1 << 17) // Ability to change permissions/ACLs #define HGFS_MODE_TAKE_OWNERSHIP (1 << 18) // Ability to change file owner/group /* * Server-side locking (oplocks and leases). * * The client can ask the server to acquire opportunistic locking/leasing * from the host FS on its behalf. This is communicated as part of an open request. * * HGFS_LOCK_OPPORTUNISTIC means that the client trusts the server * to decide what kind of locking to request from the host FS. * All other values tell the server explicitly the type of lock to * request. * * The server will attempt to acquire the desired lock and will notify the client * which type of lock was acquired as part of the reply to the open request. * Note that HGFS_LOCK_OPPORTUNISTIC should not be specified as the type of * lock acquired by the server, since HGFS_LOCK_OPPORTUNISTIC is not an * actual lock. */ typedef enum { HGFS_LOCK_NONE, HGFS_LOCK_OPPORTUNISTIC, HGFS_LOCK_EXCLUSIVE, HGFS_LOCK_SHARED, HGFS_LOCK_BATCH, HGFS_LOCK_LEASE, } HgfsLockType; /* * Flags to indicate in a setattr request which fields should be * updated. Deprecated. */ typedef uint8 HgfsAttrChanges; #define HGFS_ATTR_SIZE (1 << 0) #define HGFS_ATTR_CREATE_TIME (1 << 1) #define HGFS_ATTR_ACCESS_TIME (1 << 2) #define HGFS_ATTR_WRITE_TIME (1 << 3) #define HGFS_ATTR_CHANGE_TIME (1 << 4) #define HGFS_ATTR_PERMISSIONS (1 << 5) #define HGFS_ATTR_ACCESS_TIME_SET (1 << 6) #define HGFS_ATTR_WRITE_TIME_SET (1 << 7) /* * Hints to indicate in a getattr or setattr which attributes * are valid for the request. * For setattr only, attributes should be set by host even if * no valid values are specified by the guest. */ typedef uint64 HgfsAttrHint; #define HGFS_ATTR_HINT_SET_ACCESS_TIME (1 << 0) #define HGFS_ATTR_HINT_SET_WRITE_TIME (1 << 1) #define HGFS_ATTR_HINT_USE_FILE_DESC (1 << 2) /* * Hint to determine using a name or a handle to determine * what to delete. */ typedef uint64 HgfsDeleteHint; #define HGFS_DELETE_HINT_USE_FILE_DESC (1 << 0) /* * Hint to determine using a name or a handle to determine * what to renames. */ typedef uint64 HgfsRenameHint; #define HGFS_RENAME_HINT_USE_SRCFILE_DESC (1 << 0) #define HGFS_RENAME_HINT_USE_TARGETFILE_DESC (1 << 1) #define HGFS_RENAME_HINT_NO_REPLACE_EXISTING (1 << 2) #define HGFS_RENAME_HINT_NO_COPY_ALLOWED (1 << 3) /* * File attributes. * * The four time fields below are in Windows NT format, which is in * units of 100ns since Jan 1, 1601, UTC. */ /* * Version 1 attributes. Deprecated. * Version 2 should be using HgfsAttrV2. */ typedef #include "vmware_pack_begin.h" struct HgfsAttr { HgfsFileType type; /* File type */ uint64 size; /* File size (in bytes) */ uint64 creationTime; /* Creation time. Ignored by POSIX */ uint64 accessTime; /* Time of last access */ uint64 writeTime; /* Time of last write */ uint64 attrChangeTime; /* Time file attributess were last * changed. Ignored by Windows */ HgfsPermissions permissions; /* Permissions bits */ } #include "vmware_pack_end.h" HgfsAttr; /* Various flags and Windows attributes. */ typedef uint64 HgfsAttrFlags; #define HGFS_ATTR_HIDDEN (1 << 0) #define HGFS_ATTR_SYSTEM (1 << 1) #define HGFS_ATTR_ARCHIVE (1 << 2) #define HGFS_ATTR_HIDDEN_FORCED (1 << 3) #define HGFS_ATTR_REPARSE_POINT (1 << 4) /* V4 additional definitions for hgfsAttrFlags. */ #define HGFS_ATTR_COMPRESSED (1 << 5) #define HGFS_ATTR_ENCRYPTED (1 << 6) #define HGFS_ATTR_OFFLINE (1 << 7) #define HGFS_ATTR_READONLY (1 << 8) #define HGFS_ATTR_SPARSE (1 << 9) #define HGFS_ATTR_TEMPORARY (1 << 10) #define HGFS_ATTR_SEQUENTIAL_ONLY (1 << 11) /* * Specifies which open request fields contain * valid values. */ typedef uint64 HgfsOpenValid; #define HGFS_OPEN_VALID_NONE 0 #define HGFS_OPEN_VALID_MODE (1 << 0) #define HGFS_OPEN_VALID_FLAGS (1 << 1) #define HGFS_OPEN_VALID_SPECIAL_PERMS (1 << 2) #define HGFS_OPEN_VALID_OWNER_PERMS (1 << 3) #define HGFS_OPEN_VALID_GROUP_PERMS (1 << 4) #define HGFS_OPEN_VALID_OTHER_PERMS (1 << 5) #define HGFS_OPEN_VALID_FILE_ATTR (1 << 6) #define HGFS_OPEN_VALID_ALLOCATION_SIZE (1 << 7) #define HGFS_OPEN_VALID_DESIRED_ACCESS (1 << 8) #define HGFS_OPEN_VALID_SHARE_ACCESS (1 << 9) #define HGFS_OPEN_VALID_SERVER_LOCK (1 << 10) #define HGFS_OPEN_VALID_FILE_NAME (1 << 11) /* V4 additional open mask flags. */ #define HGFS_OPEN_VALID_EA (1 << 12) #define HGFS_OPEN_VALID_ACL (1 << 13) #define HGFS_OPEN_VALID_STREAM_NAME (1 << 14) /* * Specifies which attribute fields contain * valid values. */ typedef uint64 HgfsAttrValid; #define HGFS_ATTR_VALID_NONE 0 #define HGFS_ATTR_VALID_TYPE (1 << 0) #define HGFS_ATTR_VALID_SIZE (1 << 1) #define HGFS_ATTR_VALID_CREATE_TIME (1 << 2) #define HGFS_ATTR_VALID_ACCESS_TIME (1 << 3) #define HGFS_ATTR_VALID_WRITE_TIME (1 << 4) #define HGFS_ATTR_VALID_CHANGE_TIME (1 << 5) #define HGFS_ATTR_VALID_SPECIAL_PERMS (1 << 6) #define HGFS_ATTR_VALID_OWNER_PERMS (1 << 7) #define HGFS_ATTR_VALID_GROUP_PERMS (1 << 8) #define HGFS_ATTR_VALID_OTHER_PERMS (1 << 9) #define HGFS_ATTR_VALID_FLAGS (1 << 10) #define HGFS_ATTR_VALID_ALLOCATION_SIZE (1 << 11) #define HGFS_ATTR_VALID_USERID (1 << 12) #define HGFS_ATTR_VALID_GROUPID (1 << 13) #define HGFS_ATTR_VALID_FILEID (1 << 14) #define HGFS_ATTR_VALID_VOLID (1 << 15) /* * Add our file and volume identifiers. * NOTE: On Windows hosts, the file identifier is not guaranteed to be valid * particularly with FAT. A defrag operation could cause it to change. * Therefore, to not confuse older clients, and non-Windows * clients we have added a separate flag. * The Windows client will check for both flags for the * file ID, and return the information to the guest application. * However, it will use the ID internally, when it has an open * handle on the server. * Non-Windows clients need the file ID to be always guaranteed, * which is to say, that the ID remains constant over the course of the * file's lifetime, and will use the HGFS_ATTR_VALID_FILEID flag * only to determine if the ID is valid. */ #define HGFS_ATTR_VALID_NON_STATIC_FILEID (1 << 16) /* * File permissions that are in effect for the user which runs HGFS server. * Client needs to know effective permissions in order to implement access(2). * Client can't derive it from group/owner/other permissions because of two resaons: * 1. It does not know user/group id of the user which runs HGFS server * 2. Effective permissions account for additional restrictions that may be imposed * by host file system, for example by ACL. */ #define HGFS_ATTR_VALID_EFFECTIVE_PERMS (1 << 17) #define HGFS_ATTR_VALID_EXTEND_ATTR_SIZE (1 << 18) #define HGFS_ATTR_VALID_REPARSE_POINT (1 << 19) #define HGFS_ATTR_VALID_SHORT_NAME (1 << 20) /* * Specifies which create dir request fields contain * valid values. */ typedef uint64 HgfsCreateDirValid; #define HGFS_CREATE_DIR_VALID_NONE 0 #define HGFS_CREATE_DIR_VALID_SPECIAL_PERMS (1 << 0) #define HGFS_CREATE_DIR_VALID_OWNER_PERMS (1 << 1) #define HGFS_CREATE_DIR_VALID_GROUP_PERMS (1 << 2) #define HGFS_CREATE_DIR_VALID_OTHER_PERMS (1 << 3) #define HGFS_CREATE_DIR_VALID_FILE_NAME (1 << 4) #define HGFS_CREATE_DIR_VALID_FILE_ATTR (1 << 5) /* * Version 2 of HgfsAttr */ typedef #include "vmware_pack_begin.h" struct HgfsAttrV2 { HgfsAttrValid mask; /* A bit mask to determine valid attribute fields */ HgfsFileType type; /* File type */ uint64 size; /* File size (in bytes) */ uint64 creationTime; /* Creation time. Ignored by POSIX */ uint64 accessTime; /* Time of last access */ uint64 writeTime; /* Time of last write */ uint64 attrChangeTime; /* Time file attributes were last * changed. Ignored by Windows */ HgfsPermissions specialPerms; /* Special permissions bits (suid, etc.). * Ignored by Windows */ HgfsPermissions ownerPerms; /* Owner permissions bits */ HgfsPermissions groupPerms; /* Group permissions bits. Ignored by * Windows */ HgfsPermissions otherPerms; /* Other permissions bits. Ignored by * Windows */ HgfsAttrFlags flags; /* Various flags and Windows 'attributes' */ uint64 allocationSize; /* Actual size of file on disk */ uint32 userId; /* User identifier, ignored by Windows */ uint32 groupId; /* group identifier, ignored by Windows */ uint64 hostFileId; /* File Id of the file on host: inode_t on Linux */ uint32 volumeId; /* volume identifier, non-zero is valid. */ uint32 effectivePerms; /* Permissions in effect for the user on the host. */ uint64 reserved2; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsAttrV2; /* * Cross-platform filename representation * * Cross-platform (CP) names are represented by a string with each * path component separated by NULs, and terminated with a final NUL, * but with no leading path separator. * * For example, the representations of a POSIX and Windows name * are as follows, with "0" meaning NUL. * * Original name Cross-platform name * ----------------------------------------------------- * "/home/bac/temp" -> "home0bac0temp0" * "C:\temp\file.txt" -> "C0temp0file.txt0" * * Note that as in the example above, Windows should strip the colon * off of drive letters as part of the conversion. Aside from that, * all characters in each path component should be left unescaped and * unmodified. Each OS is responsible for escaping any characters that * are not legal in its filenames when converting FROM the CP name * format, and unescaping them when converting TO the CP name format. * * In some requests (OPEN, GETATTR, SETATTR, DELETE, CREATE_DIR) the * CP name is used to represent a particular file, but it is also used * to represent a search pattern for looking up files using * SEARCH_OPEN. * * In the current HGFS server implementation, each request has a minimum packet * size that must be met for it to be considered valid. This minimum is simply * the sizeof the particular request, which includes the solitary byte from the * HgfsFileName struct. For these particular requests, clients add an extra * byte to their payload size, without that byte being present anywhere. * * It isn't clear that this behavior is correct, but the end result is that * neither end malfunctions, as an extra byte gets sent by the client and is * ignored by the server. Unfortunately, it cannot be easily fixed. The * server's minimum packet size can be changed, but the client should continue * to send an extra byte, otherwise older servers with a slightly longer * minimum packet size may consider the new client's packets to be too short. * * UTF-8 representation * -------------------- * XXX: It is expected that file names in the HGFS protocol will be a valid UTF-8 * encoding. * See RFC 3629 (http://tools.ietf.org/html/rfc3629) * * Unicode Format * -------------- * HGFS protocol requests that contain file names as in the structure below, * should contain unicode normal form C (precomposed see explanation below) * characters therefore hosts such as Mac OS which * use HFS+ and unicode form D should convert names before * processing or sending HGFS requests. * * Precomposed (normal form C) versus Decomposed (normal form D) * ------------------------------------------------------------- * Certain Unicode characters can be encoded in more than one way. * For example, an (A acute) can be encoded either precomposed, * as U+00C1 (LATIN CAPITAL LETTER A WITH ACUTE), or decomposed, * as U+0041 U+0301 (LATIN CAPITAL LETTER A followed by a COMBINING ACUTE ACCENT). * Precomposed characters are more common in the Windows world, * whereas decomposed characters are more common on the Mac. * * See UAX 15 (http://unicode.org/reports/tr15/) */ typedef #include "vmware_pack_begin.h" struct HgfsFileName { uint32 length; /* Does NOT include terminating NUL */ char name[1]; } #include "vmware_pack_end.h" HgfsFileName; /* * Windows hosts only: the server may return the DOS 8 dot 3 format * name as part of the directory entry. */ typedef #include "vmware_pack_begin.h" struct HgfsShortFileName { uint32 length; /* Does NOT include terminating NUL */ char name[12 * 4]; /* UTF8 max char size is 4 bytes. */ } #include "vmware_pack_end.h" HgfsShortFileName; /* * Case-sensitiviy flags are only used when any lookup is * involved on the server side. */ typedef enum { HGFS_FILE_NAME_DEFAULT_CASE, HGFS_FILE_NAME_CASE_SENSITIVE, HGFS_FILE_NAME_CASE_INSENSITIVE, } HgfsCaseType; /* * HgfsFileNameV3 - new header to incorporate case-sensitivity flags along with * Hgfs file handle. */ typedef #include "vmware_pack_begin.h" struct HgfsFileNameV3 { uint32 length; /* Does NOT include terminating NUL */ uint32 flags; /* Flags described below. */ HgfsCaseType caseType; /* Case-sensitivity type. */ HgfsHandle fid; char name[1]; } #include "vmware_pack_end.h" HgfsFileNameV3; /* * HgfsFileNameV3 flags. Case-sensitiviy flags are only used when any lookup is * involved on the server side. */ #define HGFS_FILE_NAME_USE_FILE_DESC (1 << 0) /* Case type ignored if set. */ /* * Request/reply structs. These are the first members of all * operation request and reply messages, respectively. */ typedef #include "vmware_pack_begin.h" struct HgfsRequest { HgfsHandle id; /* Opaque request ID used by the requestor */ HgfsOp op; } #include "vmware_pack_end.h" HgfsRequest; typedef #include "vmware_pack_begin.h" struct HgfsReply { HgfsHandle id; /* Opaque request ID used by the requestor */ HgfsStatus status; } #include "vmware_pack_end.h" HgfsReply; /* * Messages for our file operations. */ /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsRequestOpen { HgfsRequest header; HgfsOpenMode mode; /* Which type of access is requested */ HgfsOpenFlags flags; /* Which flags to open the file with */ HgfsPermissions permissions; /* Which permissions to *create* a new file with */ HgfsFileName fileName; } #include "vmware_pack_end.h" HgfsRequestOpen; /* Version 2 of HgfsRequestOpen */ typedef #include "vmware_pack_begin.h" struct HgfsRequestOpenV2 { HgfsRequest header; HgfsOpenValid mask; /* Bitmask that specified which fields are valid. */ HgfsOpenMode mode; /* Which type of access requested. See desiredAccess */ HgfsOpenFlags flags; /* Which flags to open the file with */ HgfsPermissions specialPerms; /* Desired 'special' permissions for file creation */ HgfsPermissions ownerPerms; /* Desired 'owner' permissions for file creation */ HgfsPermissions groupPerms; /* Desired 'group' permissions for file creation */ HgfsPermissions otherPerms; /* Desired 'other' permissions for file creation */ HgfsAttrFlags attr; /* Attributes, if any, for file creation */ uint64 allocationSize; /* How much space to pre-allocate during creation */ uint32 desiredAccess; /* Extended support for windows access modes */ uint32 shareAccess; /* Windows only, share access modes */ HgfsLockType desiredLock; /* The type of lock desired by the client */ uint64 reserved1; /* Reserved for future use */ uint64 reserved2; /* Reserved for future use */ HgfsFileName fileName; } #include "vmware_pack_end.h" HgfsRequestOpenV2; /* Version 3 of HgfsRequestOpen */ typedef #include "vmware_pack_begin.h" struct HgfsRequestOpenV3 { HgfsOpenValid mask; /* Bitmask that specified which fields are valid. */ HgfsOpenMode mode; /* Which type of access requested. See desiredAccess */ HgfsOpenFlags flags; /* Which flags to open the file with */ HgfsPermissions specialPerms; /* Desired 'special' permissions for file creation */ HgfsPermissions ownerPerms; /* Desired 'owner' permissions for file creation */ HgfsPermissions groupPerms; /* Desired 'group' permissions for file creation */ HgfsPermissions otherPerms; /* Desired 'other' permissions for file creation */ HgfsAttrFlags attr; /* Attributes, if any, for file creation */ uint64 allocationSize; /* How much space to pre-allocate during creation */ uint32 desiredAccess; /* Extended support for windows access modes */ uint32 shareAccess; /* Windows only, share access modes */ HgfsLockType desiredLock; /* The type of lock desired by the client */ uint64 reserved1; /* Reserved for future use */ uint64 reserved2; /* Reserved for future use */ HgfsFileNameV3 fileName; } #include "vmware_pack_end.h" HgfsRequestOpenV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsReplyOpen { HgfsReply header; HgfsHandle file; /* Opaque file ID used by the server */ } #include "vmware_pack_end.h" HgfsReplyOpen; /* Version 2 of HgfsReplyOpen */ typedef #include "vmware_pack_begin.h" struct HgfsReplyOpenV2 { HgfsReply header; HgfsHandle file; /* Opaque file ID used by the server */ HgfsLockType acquiredLock; /* The type of lock acquired by the server */ } #include "vmware_pack_end.h" HgfsReplyOpenV2; /* Version 3 of HgfsReplyOpen */ /* * The HGFS open V3 can acquire locks and reserve disk space when requested. * However, current versions of the server don't implement the locking or allocation of * disk space on a create. These results flags indicate to the client if the server * implements handling those fields and so the clients can respond accordingly. */ typedef uint32 HgfsReplyOpenFlags; #define HGFS_OPEN_REPLY_ALLOC_DISK_SPACE (1 << 0) #define HGFS_OPEN_REPLY_LOCKED_FILE (1 << 1) typedef #include "vmware_pack_begin.h" struct HgfsReplyOpenV3 { HgfsHandle file; /* Opaque file ID used by the server */ HgfsLockType acquiredLock; /* The type of lock acquired by the server */ HgfsReplyOpenFlags flags; /* Opened file flags */ uint32 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsReplyOpenV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsRequestRead { HgfsRequest header; HgfsHandle file; /* Opaque file ID used by the server */ uint64 offset; uint32 requiredSize; } #include "vmware_pack_end.h" HgfsRequestRead; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsReplyRead { HgfsReply header; uint32 actualSize; char payload[1]; } #include "vmware_pack_end.h" HgfsReplyRead; /* * Version 3 of HgfsRequestRead. * Server must support HGFS_LARGE_PACKET_MAX to implement this op. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestReadV3 { HgfsHandle file; /* Opaque file ID used by the server */ uint64 offset; uint32 requiredSize; uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsRequestReadV3; typedef #include "vmware_pack_begin.h" struct HgfsReplyReadV3 { uint32 actualSize; uint64 reserved; /* Reserved for future use */ char payload[1]; } #include "vmware_pack_end.h" HgfsReplyReadV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsRequestWrite { HgfsRequest header; HgfsHandle file; /* Opaque file ID used by the server */ HgfsWriteFlags flags; uint64 offset; uint32 requiredSize; char payload[1]; } #include "vmware_pack_end.h" HgfsRequestWrite; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsReplyWrite { HgfsReply header; uint32 actualSize; } #include "vmware_pack_end.h" HgfsReplyWrite; /* * Version 3 of HgfsRequestWrite. * Server must support HGFS_LARGE_PACKET_MAX to implement this op. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestWriteV3 { HgfsHandle file; /* Opaque file ID used by the server */ HgfsWriteFlags flags; uint64 offset; uint32 requiredSize; uint64 reserved; /* Reserved for future use */ char payload[1]; } #include "vmware_pack_end.h" HgfsRequestWriteV3; typedef #include "vmware_pack_begin.h" struct HgfsReplyWriteV3 { uint32 actualSize; uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsReplyWriteV3; /* Stream write flags */ typedef enum { HGFS_WIN32_STREAM_IGNORE_SECURITY = (1<<0), } HgfsWin32StreamFlags; /* * HgfsRequestWriteWin32Stream. * Server must support HGFS_LARGE_PACKET_MAX to implement this op. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestWriteWin32StreamV3 { HgfsHandle file; /* Opaque file ID used by the server */ HgfsWin32StreamFlags flags; uint32 reserved1; uint32 requiredSize; uint64 reserved2; /* Reserved for future use */ char payload[1]; } #include "vmware_pack_end.h" HgfsRequestWriteWin32StreamV3; typedef #include "vmware_pack_begin.h" struct HgfsReplyWriteWin32StreamV3 { uint32 actualSize; uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsReplyWriteWin32StreamV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsRequestClose { HgfsRequest header; HgfsHandle file; /* Opaque file ID used by the server */ } #include "vmware_pack_end.h" HgfsRequestClose; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsReplyClose { HgfsReply header; } #include "vmware_pack_end.h" HgfsReplyClose; typedef #include "vmware_pack_begin.h" struct HgfsRequestCloseV3 { HgfsHandle file; /* Opaque file ID used by the server */ uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsRequestCloseV3; typedef #include "vmware_pack_begin.h" struct HgfsReplyCloseV3 { uint64 reserved; } #include "vmware_pack_end.h" HgfsReplyCloseV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsRequestSearchOpen { HgfsRequest header; HgfsFileName dirName; } #include "vmware_pack_end.h" HgfsRequestSearchOpen; typedef #include "vmware_pack_begin.h" struct HgfsRequestSearchOpenV3 { uint64 reserved; /* Reserved for future use */ HgfsFileNameV3 dirName; } #include "vmware_pack_end.h" HgfsRequestSearchOpenV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsReplySearchOpen { HgfsReply header; HgfsHandle search; /* Opaque search ID used by the server */ } #include "vmware_pack_end.h" HgfsReplySearchOpen; typedef #include "vmware_pack_begin.h" struct HgfsReplySearchOpenV3 { HgfsHandle search; /* Opaque search ID used by the server */ uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsReplySearchOpenV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsRequestSearchRead { HgfsRequest header; HgfsHandle search; /* Opaque search ID used by the server */ uint32 offset; /* The first result is offset 0 */ } #include "vmware_pack_end.h" HgfsRequestSearchRead; /* Version 2 of HgfsRequestSearchRead */ typedef #include "vmware_pack_begin.h" struct HgfsRequestSearchReadV2 { HgfsRequest header; HgfsHandle search; /* Opaque search ID used by the server */ uint32 offset; /* The first result is offset 0 */ } #include "vmware_pack_end.h" HgfsRequestSearchReadV2; typedef #include "vmware_pack_begin.h" struct HgfsRequestSearchReadV3 { HgfsHandle search; /* Opaque search ID used by the server */ uint32 offset; /* The first result is offset 0 */ uint32 flags; /* Reserved for reading multiple directory entries. */ uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsRequestSearchReadV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsReplySearchRead { HgfsReply header; HgfsAttr attr; HgfsFileName fileName; /* fileName.length = 0 means "no entry at this offset" */ } #include "vmware_pack_end.h" HgfsReplySearchRead; /* Version 2 of HgfsReplySearchRead */ typedef #include "vmware_pack_begin.h" struct HgfsReplySearchReadV2 { HgfsReply header; HgfsAttrV2 attr; /* * fileName.length = 0 means "no entry at this offset" * If the file is a symlink (as specified in attr) * this name is the name of the symlink, not the target. */ HgfsFileName fileName; } #include "vmware_pack_end.h" HgfsReplySearchReadV2; /* Directory entry structure. */ typedef struct HgfsDirEntry { uint32 nextEntry; HgfsAttrV2 attr; /* * fileName.length = 0 means "no entry at this offset" * If the file is a symlink (as specified in attr) * this name is the name of the symlink, not the target. */ HgfsFileNameV3 fileName; } HgfsDirEntry; typedef #include "vmware_pack_begin.h" struct HgfsReplySearchReadV3 { uint64 count; /* Number of directory entries. */ uint64 reserved; /* Reserved for future use. */ char payload[1]; /* Directory entries. */ } #include "vmware_pack_end.h" HgfsReplySearchReadV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsRequestSearchClose { HgfsRequest header; HgfsHandle search; /* Opaque search ID used by the server */ } #include "vmware_pack_end.h" HgfsRequestSearchClose; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsReplySearchClose { HgfsReply header; } #include "vmware_pack_end.h" HgfsReplySearchClose; typedef #include "vmware_pack_begin.h" struct HgfsRequestSearchCloseV3 { HgfsHandle search; /* Opaque search ID used by the server */ uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsRequestSearchCloseV3; typedef #include "vmware_pack_begin.h" struct HgfsReplySearchCloseV3 { uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsReplySearchCloseV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsRequestGetattr { HgfsRequest header; HgfsFileName fileName; } #include "vmware_pack_end.h" HgfsRequestGetattr; /* Version 2 of HgfsRequestGetattr */ typedef #include "vmware_pack_begin.h" struct HgfsRequestGetattrV2 { HgfsRequest header; HgfsAttrHint hints; /* Flags for file handle valid. */ HgfsHandle file; /* Opaque file ID used by the server. */ HgfsFileName fileName; /* Filename used when file handle invalid. */ } #include "vmware_pack_end.h" HgfsRequestGetattrV2; typedef #include "vmware_pack_begin.h" struct HgfsRequestGetattrV3 { HgfsAttrHint hints; /* Flags for file handle valid. */ uint64 reserved; /* Reserved for future use */ HgfsFileNameV3 fileName; /* Filename used when file handle invalid. */ } #include "vmware_pack_end.h" HgfsRequestGetattrV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsReplyGetattr { HgfsReply header; HgfsAttr attr; } #include "vmware_pack_end.h" HgfsReplyGetattr; /* Version 2 of HgfsReplyGetattr */ typedef #include "vmware_pack_begin.h" struct HgfsReplyGetattrV2 { HgfsReply header; HgfsAttrV2 attr; /* * If the file is a symlink, as specified in attr.type, then this is * the target for the symlink. If the file is not a symlink, this should * be ignored. * * This filename is in "CPNameLite" format. See CPNameLite.c for details. */ HgfsFileName symlinkTarget; } #include "vmware_pack_end.h" HgfsReplyGetattrV2; typedef #include "vmware_pack_begin.h" struct HgfsReplyGetattrV3 { HgfsAttrV2 attr; /* * If the file is a symlink, as specified in attr.type, then this is * the target for the symlink. If the file is not a symlink, this should * be ignored. * * This filename is in "CPNameLite" format. See CPNameLite.c for details. */ uint64 reserved; /* Reserved for future use */ HgfsFileNameV3 symlinkTarget; } #include "vmware_pack_end.h" HgfsReplyGetattrV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsRequestSetattr { HgfsRequest header; HgfsAttrChanges update; /* Which fields need to be updated */ HgfsAttr attr; HgfsFileName fileName; } #include "vmware_pack_end.h" HgfsRequestSetattr; /* Version 2 of HgfsRequestSetattr */ typedef #include "vmware_pack_begin.h" struct HgfsRequestSetattrV2 { HgfsRequest header; HgfsAttrHint hints; HgfsAttrV2 attr; HgfsHandle file; /* Opaque file ID used by the server. */ HgfsFileName fileName; /* Filename used when file handle invalid. */ } #include "vmware_pack_end.h" HgfsRequestSetattrV2; typedef #include "vmware_pack_begin.h" struct HgfsRequestSetattrV3 { HgfsAttrHint hints; HgfsAttrV2 attr; uint64 reserved; /* Reserved for future use */ HgfsFileNameV3 fileName; /* Filename used when file handle invalid. */ } #include "vmware_pack_end.h" HgfsRequestSetattrV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsReplySetattr { HgfsReply header; } #include "vmware_pack_end.h" HgfsReplySetattr; /* Version 2 of HgfsReplySetattr */ typedef #include "vmware_pack_begin.h" struct HgfsReplySetattrV2 { HgfsReply header; } #include "vmware_pack_end.h" HgfsReplySetattrV2; typedef #include "vmware_pack_begin.h" struct HgfsReplySetattrV3 { uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsReplySetattrV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsRequestCreateDir { HgfsRequest header; HgfsPermissions permissions; HgfsFileName fileName; } #include "vmware_pack_end.h" HgfsRequestCreateDir; /* Version 2 of HgfsRequestCreateDir */ typedef #include "vmware_pack_begin.h" struct HgfsRequestCreateDirV2 { HgfsRequest header; HgfsCreateDirValid mask; HgfsPermissions specialPerms; HgfsPermissions ownerPerms; HgfsPermissions groupPerms; HgfsPermissions otherPerms; HgfsFileName fileName; } #include "vmware_pack_end.h" HgfsRequestCreateDirV2; /* Version 3 of HgfsRequestCreateDir */ typedef #include "vmware_pack_begin.h" struct HgfsRequestCreateDirV3 { HgfsCreateDirValid mask; HgfsPermissions specialPerms; HgfsPermissions ownerPerms; HgfsPermissions groupPerms; HgfsPermissions otherPerms; HgfsAttrFlags fileAttr; HgfsFileNameV3 fileName; } #include "vmware_pack_end.h" HgfsRequestCreateDirV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsReplyCreateDir { HgfsReply header; } #include "vmware_pack_end.h" HgfsReplyCreateDir; /* Version 2 of HgfsReplyCreateDir */ typedef #include "vmware_pack_begin.h" struct HgfsReplyCreateDirV2 { HgfsReply header; } #include "vmware_pack_end.h" HgfsReplyCreateDirV2; /* Version 3 of HgfsReplyCreateDir */ typedef #include "vmware_pack_begin.h" struct HgfsReplyCreateDirV3 { uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsReplyCreateDirV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsRequestDelete { HgfsRequest header; HgfsFileName fileName; } #include "vmware_pack_end.h" HgfsRequestDelete; /* Version 2 of HgfsRequestDelete */ typedef #include "vmware_pack_begin.h" struct HgfsRequestDeleteV2 { HgfsRequest header; HgfsDeleteHint hints; HgfsHandle file; /* Opaque file ID used by the server. */ HgfsFileName fileName; /* Name used if the file is HGFS_HANDLE_INVALID */ } #include "vmware_pack_end.h" HgfsRequestDeleteV2; /* Version 3 of HgfsRequestDelete */ typedef #include "vmware_pack_begin.h" struct HgfsRequestDeleteV3 { HgfsDeleteHint hints; uint64 reserved; /* Reserved for future use */ HgfsFileNameV3 fileName; /* Name used if the file is HGFS_HANDLE_INVALID */ } #include "vmware_pack_end.h" HgfsRequestDeleteV3; /* Deprecated */ typedef #include "vmware_pack_begin.h" struct HgfsReplyDelete { HgfsReply header; } #include "vmware_pack_end.h" HgfsReplyDelete; /* Version 2 of HgfsReplyDelete */ typedef #include "vmware_pack_begin.h" struct HgfsReplyDeleteV2 { HgfsReply header; } #include "vmware_pack_end.h" HgfsReplyDeleteV2; /* Version 2 of HgfsReplyDelete */ typedef #include "vmware_pack_begin.h" struct HgfsReplyDeleteV3 { uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsReplyDeleteV3; /* * The size of the HgfsFileName struct is variable depending on the * length of the name, so you can't use request->newName to get the * actual address of the new name, because where it starts is * dependant on how long the oldName is. To get the address of * newName, use this: * * &oldName + sizeof(HgfsFileName) + oldName.length */ typedef #include "vmware_pack_begin.h" struct HgfsRequestRename { HgfsRequest header; HgfsFileName oldName; HgfsFileName newName; } #include "vmware_pack_end.h" HgfsRequestRename; typedef #include "vmware_pack_begin.h" struct HgfsReplyRename { HgfsReply header; } #include "vmware_pack_end.h" HgfsReplyRename; typedef #include "vmware_pack_begin.h" struct HgfsRequestRenameV2 { HgfsRequest header; HgfsRenameHint hints; HgfsHandle srcFile; /* Opaque file ID to "old name" used by the server. */ HgfsHandle targetFile; /* Opaque file ID to "old name" used by the server. */ HgfsFileName oldName; HgfsFileName newName; } #include "vmware_pack_end.h" HgfsRequestRenameV2; typedef #include "vmware_pack_begin.h" struct HgfsReplyRenameV2 { HgfsReply header; } #include "vmware_pack_end.h" HgfsReplyRenameV2; /* HgfsRequestRename and HgfsReplyRename for v3. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestRenameV3 { HgfsRenameHint hints; uint64 reserved; /* Reserved for future use */ HgfsFileNameV3 oldName; HgfsFileNameV3 newName; } #include "vmware_pack_end.h" HgfsRequestRenameV3; typedef #include "vmware_pack_begin.h" struct HgfsReplyRenameV3 { uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsReplyRenameV3; typedef #include "vmware_pack_begin.h" struct HgfsRequestQueryVolume { HgfsRequest header; HgfsFileName fileName; } #include "vmware_pack_end.h" HgfsRequestQueryVolume; typedef #include "vmware_pack_begin.h" struct HgfsReplyQueryVolume { HgfsReply header; uint64 freeBytes; uint64 totalBytes; } #include "vmware_pack_end.h" HgfsReplyQueryVolume; /* HgfsRequestQueryVolume and HgfsReplyQueryVolume for v3. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestQueryVolumeV3 { uint64 reserved; /* Reserved for future use */ HgfsFileNameV3 fileName; } #include "vmware_pack_end.h" HgfsRequestQueryVolumeV3; typedef #include "vmware_pack_begin.h" struct HgfsReplyQueryVolumeV3 { uint64 freeBytes; uint64 totalBytes; uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsReplyQueryVolumeV3; /* New operations for Version 2 */ typedef #include "vmware_pack_begin.h" struct HgfsRequestServerLockChange { HgfsRequest header; HgfsHandle file; HgfsLockType newServerLock; } #include "vmware_pack_end.h" HgfsRequestServerLockChange; typedef #include "vmware_pack_begin.h" struct HgfsReplyServerLockChange { HgfsReply header; HgfsLockType serverLock; } #include "vmware_pack_end.h" HgfsReplyServerLockChange; typedef #include "vmware_pack_begin.h" struct HgfsRequestSymlinkCreate { HgfsRequest header; HgfsFileName symlinkName; /* This filename is in "CPNameLite" format. See CPNameLite.c for details. */ HgfsFileName targetName; } #include "vmware_pack_end.h" HgfsRequestSymlinkCreate; typedef #include "vmware_pack_begin.h" struct HgfsReplySymlinkCreate { HgfsReply header; } #include "vmware_pack_end.h" HgfsReplySymlinkCreate; /* HgfsRequestSymlinkCreate and HgfsReplySymlinkCreate for v3. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestSymlinkCreateV3 { uint64 reserved; /* Reserved for future use */ HgfsFileNameV3 symlinkName; /* This filename is in "CPNameLite" format. See CPNameLite.c for details. */ HgfsFileNameV3 targetName; } #include "vmware_pack_end.h" HgfsRequestSymlinkCreateV3; typedef #include "vmware_pack_begin.h" struct HgfsReplySymlinkCreateV3 { uint64 reserved; /* Reserved for future use */ } #include "vmware_pack_end.h" HgfsReplySymlinkCreateV3; /* HGFS protocol version 4 definitions. */ #define HGFS_HEADER_VERSION_1 1 #define HGFS_HEADER_VERSION HGFS_HEADER_VERSION_1 /* * Flags to indicate the type of packet following the header and * the overall state of the operation. */ #define HGFS_PACKET_FLAG_REQUEST (1 << 0) // Request packet #define HGFS_PACKET_FLAG_REPLY (1 << 1) // Reply packet #define HGFS_PACKET_FLAG_INFO_EXTERROR (1 << 2) // Info has ext error #define HGFS_PACKET_FLAG_VALID_FLAGS (0x7) // Mask for valid values typedef #include "vmware_pack_begin.h" struct HgfsHeader { uint8 version; /* Header version. */ uint8 reserved1[3]; /* Reserved for future use. */ HgfsOp dummy; /* Needed to distinguish between older and newer header. */ uint32 packetSize; /* Size of the packet, including the header size. */ uint32 headerSize; /* Size of the Hgfs header. */ uint32 requestId; /* Request ID. */ HgfsOp op; /* Operation. */ uint32 status; /* Return value. */ uint32 flags; /* Flags. See above. */ uint32 information; /* Generic field, used e.g. for native error code. */ uint64 sessionId; /* Session ID. */ uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsHeader; typedef uint32 HgfsOpCapFlags; /* * The operation capability flags. * * These flags apply to all operations and occupy the least significant * 16 bits of the HgfsOpCapFlags type. */ /* * HGFS_OP_CAPFLAG_NOT_SUPPORTED * If no flags are set then the capability is not supported by the host. */ #define HGFS_OP_CAPFLAG_NOT_SUPPORTED 0 /* * HGFS_OP_CAPFLAG_IS_SUPPORTED * Set for each request that is supported by a host or client. * To be set for an Hgfs session both host and client must have the capability. */ #define HGFS_OP_CAPFLAG_IS_SUPPORTED (1 << 0) /* * HGFS_OP_CAPFLAG_ASYNCHRONOUS * Set for each request that can be handled asynchronously by a host or client. * By default all operations are handled synchronously but if this flag is set * by a client and a host then the operation can be handled in an asynchronous manner too. */ #define HGFS_OP_CAPFLAG_ASYNCHRONOUS (1 << 1) /* * The operation specific capability flags. * * These flags apply only to the operation given by the name and occupy the * most significant 16 bits of the HgfsOpCapFlags type. */ /* * Following flags define which optional parameters for file open * requests are supported by the host. * HGFS_OP_CAPFLAG_OPENV4_EA - host is capable of setting EA when creating * a new file. * HGFS_OP_CAPFLAG_OPENV4_ACL - host is capable of setting ACLs when creating * a new file. * HGFS_OP_CAPFLAG_OPENV4_NAMED_STREAMS - opening/enumerating named streams * is supported. * HGFS_OP_CAPFLAG_OPENV4_SHARED_ACCESS - host supports file sharing restrictions. * HGFS_OP_CAPFLAG_OPENV4_UNIX_PERMISSIONS - host stores POSIX permissions with * file. * HGFS_OP_CAPFLAG_OPENV4_POSIX_DELETION - host supports POSIX file deletion semantics. */ #define HGFS_OP_CAPFLAG_OPENV4_EA (1 << 16) #define HGFS_OP_CAPFLAG_OPENV4_ACL (1 << 17) #define HGFS_OP_CAPFLAG_OPENV4_NAMED_STREAMS (1 << 18) #define HGFS_OP_CAPFLAG_OPENV4_SHARED_ACCESS (1 << 19) #define HGFS_OP_CAPFLAG_OPENV4_UNIX_PERMISSIONS (1 << 20) #define HGFS_OP_CAPFLAG_OPENV4_POSIX_DELETION (1 << 21) /* * There is a significant difference in byte range locking semantics between Windows * and POSIX file systems. Windows implements mandatory locking which means that every * read or write request that conflicts with byte range locks is rejected. POSIX has * an advisory locking which means that locks are validated only when another lock is * requested and are not enforced for read/write operations. * Applications in guest OS may expect byte range locking semantics that matches guest * OS which may be different from semantics that is natively supported by host OS. In * this case either HGFS server or HGFS client should provide compensation for the host * OS semantics to maintain application compatibility. * Client must know if the server is capable to provide appropriate byte range locking * semantics to perform some compensation on behalf of server when necessary. * * Following flags define various capabilities of byte range lock implementation on * the host. * * HGFS_OP_CAPFLAG_BYTE_RANGE_LOCKS_64 means that server is capable of locking 64 bit * length ranges. * HGFS_OP_CAPFLAG_BYTE_RANGE_LOCKS_32 means that server is limited to 32-bit ranges. * HGFS_OP_CAPFLAG_BYTE_RANGE_LOCKS_MANDATORY means that server is capable of enforcing * read/write restrictions for locked ranges. * HGFS_OP_CAPFLAG_BYTE_RANGE_LOCKS_ADVISORY means that server supports advisory locking; * locks are validated only for other bytes * range locking and are not enforced * for read/write operations. */ #define HGFS_OP_CAPFLAG_BYTE_RANGE_LOCKS_64 (1 << 16) #define HGFS_OP_CAPFLAG_BYTE_RANGE_LOCKS_32 (1 << 17) #define HGFS_OP_CAPFLAG_BYTE_RANGE_LOCKS_MANDATORY (1 << 18) #define HGFS_OP_CAPFLAG_BYTE_RANGE_LOCKS_ADVISORY (1 << 19) /* HGFS_SUPPORTS_HARD_LINKS is set when the host supports hard links. */ #define HGFS_OP_CAPFLAG_LINKMOVE_HARD_LINKS (1 << 16) /* * HGFS_SET_WATCH_SUPPORTS_FINE_GRAIN_EVENTS is set when host supports * fine grain event reporting for directory notification. */ #define HGFS_OP_CAPFLAG_SET_WATCH_FINE_GRAIN_EVENTS (1 << 16) typedef #include "vmware_pack_begin.h" struct HgfsOpCapability { HgfsOp op; /* Op. */ HgfsOpCapFlags flags; /* Flags. */ } #include "vmware_pack_end.h" HgfsOpCapability; typedef HgfsFileName HgfsUserName; typedef HgfsFileName HgfsGroupName; /* Following structures describe user identity on the host which runs HGFS service. */ typedef #include "vmware_pack_begin.h" struct HgfsIdentity { uint32 uid; /* user id. */ uint32 gid; /* Primary group id. */ HgfsUserName user; /* User name in form specified in RFC 3530. */ HgfsGroupName group; /* Group name in form specified in RFC 3530. */ } #include "vmware_pack_end.h" HgfsIdentity; #define HGFS_INVALID_SESSION_ID (~((uint64)0)) /* * The HGFS session flags. These determine the state and validity of the session * information. * It is envisaged that flags will be set for notifying the clients of file system * feature support that transcend multiple request types i.e., HGFS opcodes. */ typedef uint32 HgfsSessionFlags; #define HGFS_SESSION_MAXPACKETSIZE_VALID (1 << 0) #define HGFS_SESSION_CHANGENOTIFY_ENABLED (1 << 1) #define HGFS_SESSION_OPLOCK_ENABLED (1 << 2) typedef #include "vmware_pack_begin.h" struct HgfsRequestCreateSessionV4 { uint32 numCapabilities; /* Number of capabilities to follow. */ uint32 maxPacketSize; /* Maximum packet size supported. */ HgfsSessionFlags flags; /* Session capability flags. */ uint32 reserved; /* Reserved for future use. */ HgfsOpCapability capabilities[1]; /* Array of HgfsCapabilities. */ } #include "vmware_pack_end.h" HgfsRequestCreateSessionV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyCreateSessionV4 { uint64 sessionId; /* Session ID. */ uint32 numCapabilities; /* Number of capabilities to follow. */ uint32 maxPacketSize; /* Maximum packet size supported. */ uint32 identityOffset; /* Offset to HgfsIdentity or 0 if no identity. */ HgfsSessionFlags flags; /* Flags. */ uint32 reserved; /* Reserved for future use. */ HgfsOpCapability capabilities[1]; /* Array of HgfsCapabilities. */ } #include "vmware_pack_end.h" HgfsReplyCreateSessionV4; typedef #include "vmware_pack_begin.h" struct HgfsRequestDestroySessionV4 { uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsRequestDestroySessionV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyDestroySessionV4 { uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsReplyDestroySessionV4; /* Adds new error status: HGFS_STATUS_INVALID_SESSION. */ /* * If file handle is used to set watch (HGFS_FILE_NAME_USE_FILE_DESC * is set in the fileName), closing this handle implicitly removes the watch. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestSetWatchV4 { uint64 events; /* What events to watch? */ uint32 flags; /* Flags. */ uint64 reserved; /* Reserved for future use. */ HgfsFileNameV3 fileName; /* Filename to watch. */ } #include "vmware_pack_end.h" HgfsRequestSetWatchV4; /* * Coarse grain notification event types. */ #define HGFS_ACTION_ADDED (1 << 0) /* File was added. */ #define HGFS_ACTION_REMOVED (1 << 1) /* File was removed. */ #define HGFS_ACTION_MODIFIED (1 << 2) /* File attributes were changed. */ #define HGFS_ACTION_RENAMED (1 << 3) /* File was renamed. */ /* * Fine grain notification event types. * HgfsRequestSetWatch events. */ #define HGFS_NOTIFY_ACCESS (1 << 0) /* File accessed (read) */ #define HGFS_NOTIFY_ATTRIB (1 << 1) /* File attributes changed. */ #define HGFS_NOTIFY_SIZE (1 << 2) /* File size changed. */ #define HGFS_NOTIFY_ATIME (1 << 3) /* Access time changed. */ #define HGFS_NOTIFY_MTIME (1 << 4) /* Modification time changed. */ #define HGFS_NOTIFY_CTIME (1 << 5) /* Attribute time changed. */ #define HGFS_NOTIFY_CRTIME (1 << 6) /* Creation time changed. */ #define HGFS_NOTIFY_NAME (1 << 7) /* File / Directory name. */ #define HGFS_NOTIFY_OPEN (1 << 8) /* File opened */ #define HGFS_NOTIFY_CLOSE_WRITE (1 << 9) /* Modified file closed. */ #define HGFS_NOTIFY_CLOSE_NOWRITE (1 << 10) /* Non-modified file closed. */ #define HGFS_NOTIFY_CREATE_FILE (1 << 11) /* File created */ #define HGFS_NOTIFY_CREATE_DIR (1 << 12) /* Directory created */ #define HGFS_NOTIFY_DELETE_FILE (1 << 13) /* File deleted */ #define HGFS_NOTIFY_DELETE_DIR (1 << 14) /* Directory deleted */ #define HGFS_NOTIFY_DELETE_SELF (1 << 15) /* Watched directory deleted */ #define HGFS_NOTIFY_MODIFY (1 << 16) /* File modified. */ #define HGFS_NOTIFY_MOVE_SELF (1 << 17) /* Watched directory moved. */ #define HGFS_NOTIFY_OLD_FILE_NAME (1 << 18) /* Rename: old file name. */ #define HGFS_NOTIFY_NEW_FILE_NAME (1 << 19) /* Rename: new file name. */ #define HGFS_NOTIFY_OLD_DIR_NAME (1 << 20) /* Rename: old dir name. */ #define HGFS_NOTIFY_NEW_DIR_NAME (1 << 21) /* Rename: new dir name. */ #define HGFS_NOTIFY_CHANGE_EA (1 << 22) /* Extended attributes. */ #define HGFS_NOTIFY_CHANGE_SECURITY (1 << 23) /* Security/permissions. */ #define HGFS_NOTIFY_ADD_STREAM (1 << 24) /* Named stream created. */ #define HGFS_NOTIFY_DELETE_STREAM (1 << 25) /* Named stream deleted. */ #define HGFS_NOTIFY_CHANGE_STREAM_SIZE (1 << 26) /* Named stream size changed. */ #define HGFS_NOTIFY_CHANGE_STREAM_LAST_WRITE (1 << 27) /* Stream timestamp changed. */ #define HGFS_NOTIFY_WATCH_DELETED (1 << 28) /* Dir with watch deleted. */ #define HGFS_NOTIFY_EVENTS_DROPPED (1 << 29) /* Notifications dropped. */ /* HgfsRequestSetWatch flags. */ #define HGFS_NOTIFY_FLAG_WATCH_TREE (1 << 0) /* Watch the entire directory tree. */ #define HGFS_NOTIFY_FLAG_DONT_FOLLOW (1 << 1) /* Don't follow symlinks. */ #define HGFS_NOTIFY_FLAG_ONE_SHOT (1 << 2) /* Generate only one notification. */ #define HGFS_NOTIFY_FLAG_POSIX_HINT (1 << 3) /* Client is POSIX and thus expects * fine grain notification. Server * may provide coarse grain * notification even if this flag is * set. */ typedef uint64 HgfsSubscriberHandle; #define HGFS_INVALID_SUBSCRIBER_HANDLE ((HgfsSubscriberHandle)~((HgfsSubscriberHandle)0)) typedef #include "vmware_pack_begin.h" struct HgfsReplySetWatchV4 { HgfsSubscriberHandle watchId; /* Watch identifier for subsequent references. */ uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsReplySetWatchV4; typedef #include "vmware_pack_begin.h" struct HgfsRequestRemoveWatchV4 { HgfsSubscriberHandle watchId; /* Watch identifier to remove. */ } #include "vmware_pack_end.h" HgfsRequestRemoveWatchV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyRemoveWatchV4 { uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsReplyRemoveWatchV4; typedef #include "vmware_pack_begin.h" struct HgfsNotifyEventV4 { uint32 nextOffset; /* Offset of next event; 0 if it i sthe last one. */ uint64 mask; /* Event occurred. */ uint64 reserved; /* Reserved for future use. */ HgfsFileName fileName; /* Filename. */ } #include "vmware_pack_end.h" HgfsNotifyEventV4; /* Too many events, some or all event were dropped by the server. */ #define HGFS_NOTIFY_FLAG_OVERFLOW (1 << 0) /* Watch had been removed either explicitly or implicitly. */ #define HGFS_NOTIFY_FLAG_REMOVED (1 << 1) /* Server generated coasrse grain events. */ #define HGFS_NOTIFY_FLAG_COARSE_GRAIN (1 << 2) typedef #include "vmware_pack_begin.h" struct HgfsRequestNotifyV4 { HgfsSubscriberHandle watchId; /* Watch identifier. */ uint32 flags; /* Various flags. */ uint32 count; /* Number of events occured. */ uint64 reserved; /* Reserved for future use. */ HgfsNotifyEventV4 events[1]; /* Events. HgfsNotifyEvent(s). */ } #include "vmware_pack_end.h" HgfsRequestNotifyV4; // Query EA flags values. #define HGFS_QUERY_EA_INDEX_SPECIFIED (1 << 0) #define HGFS_QUERY_EA_SINGLE_ENTRY (1 << 1) #define HGFS_QUERY_EA_RESTART_SCAN (1 << 2) typedef #include "vmware_pack_begin.h" struct HgfsRequestQueryEAV4 { uint32 flags; /* EA flags. */ uint32 index; uint64 reserved; /* Reserved for future use. */ uint32 eaNameLength; /* EA name length. */ uint32 eaNameOffset; /* Offset of the eaName field. */ HgfsFileNameV3 fileName; /* File to watch. */ char eaNames[1]; /* List of NULL terminated EA names. * Actual location of the data depends on * fileName length and defined by eaNameOffset. */ } #include "vmware_pack_end.h" HgfsRequestQueryEAV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyQueryEAV4 { uint32 nextOffset; /* Offset of the next structure when more then * one record is returned. */ uint32 flags; /* EA flags. */ uint32 index; /* Index needed to resume scan. */ uint64 reserved; /* Reserved for future use. */ uint32 eaDataLength; /* EA value length. */ char eaData[1]; /* NULL termianed EA name followed by EA value. */ } #include "vmware_pack_end.h" HgfsReplyQueryEAV4; typedef #include "vmware_pack_begin.h" struct HgfsEAV4 { uint32 nextOffset; /* Offset of the next structure in the chain. */ uint32 valueLength; /* EA value length. */ char data[1]; /* NULL terminated EA name followed by EA value. */ } #include "vmware_pack_end.h" HgfsEAV4; typedef #include "vmware_pack_begin.h" struct HgfsRequestSetEAV4 { uint32 flags; /* Flags, see below. */ uint64 reserved; /* Reserved for future use. */ uint32 numEAs; /* Number of EAs in this request. */ HgfsEAV4 attributes[1]; /* Array of attributes. */ } #include "vmware_pack_end.h" HgfsRequestSetEAV4; typedef #include "vmware_pack_begin.h" struct HgfsReplySetEAV4 { uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsReplySetEAV4; /* * EA Flags. When both flags are set EA is either created or replaced if it exists. * HGFS_EA_FLAG_CREATE - create if EA is not present, error otherwise. * HGFS_EA_FLAG_REPLACE - Replace exisitng EA. Error if EA not already present. */ #define HGFS_EA_FLAG_CREATE (1 << 0) #define HGFS_EA_FLAG_REPLACE (1 << 1) /* * Byte range lock flag values: * HGFS_RANGE_LOCK_EXCLUSIVE - Requested lock is exclusive when this flag is set, * otherwise it is a shared lock. * HGFS_RANGE_LOCK_FAIL_IMMEDIATLY - If the flag is not set server waits until the * lock becomes available. */ #define HGFS_RANGE_LOCK_EXCLUSIVE (1 << 0) #define HGFS_RANGE_LOCK_FAIL_IMMEDIATLY (1 << 1) typedef #include "vmware_pack_begin.h" struct HgfsRequestLockRangeV4 { HgfsHandle fid; /* File to take lock on. */ uint32 flags; /* Various flags. */ uint64 start; /* Starting offset in the file. */ uint64 length; /* Number of bytes to lock. */ uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsRequestLockRangeV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyLockRangeV4 { uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsReplyLockRangeV4; #define HGFS_RANGE_LOCK_UNLOCK_ALL (1 << 0) typedef #include "vmware_pack_begin.h" struct HgfsRequestUnlockRangeV4 { HgfsHandle fid; /* File to take lock on. */ uint32 flags; /* Various flags. */ uint64 start; /* Starting offset in the file. */ uint64 length; /* Number of bytes to lock. */ uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsRequestUnlockRangeV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyUnlockRangeV4 { uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsReplyUnlockRangeV4; /* * There are three types of oplocks: level 1, batch, and level 2. Both the level 1 and * batch oplocks are "exclusive access" opens. They are used slightly differently, * however, and hence have somewhat different semantics. A level 2 oplock is a "shared * access" grant on the file. * Level 1 is used by a remote client that wishes to modify the data. Once granted a * Level 1 oplock, the remote client may cache the data, modify the data in its cache * and need not write it back to the server immediately. * Batch oplocks are used by remote clients for accessing script files where the file is * opened, read or written, and then closed repeatedly. Thus, a batch oplock * corresponds not to a particular application opening the file, but rather to a remote * clients network file system caching the file because it knows something about the * semantics of the given file access. The name "batch" comes from the fact that this * behavior was observed by Microsoft with "batch files" being processed by command line * utilities. Log files especially exhibit this behavior when a script it being * processed each command is executed in turn. If the output of the script is redirected * to a log file the file fits the pattern described earlier, namely open/write/close. * With many lines in a file this pattern can be repeated hundreds of times. * Level 2 is used by a remote client that merely wishes to read the data. Once granted * a Level 2 oplock, the remote client may cache the data and need not worry that the * data on the remote file server will change without it being advised of that change. * An oplock must be broken whenever the cache consistency guarantee provided by the * oplock can no longer be provided. Thus, whenever a second network client attempts to * access data in the same file across the network, the file server is responsible for * "breaking" the oplocks and only then allowing the remote client to access the file. * This ensures that the data is guaranteed to be consistent and hence we have preserved * the consistency guarantees essential to proper operation. * * HGFS_OPLOCK_NONE: no oplock. No caching on client side. * HGFS_OPLOCK_SHARED: shared (or LEVEL II) oplock. Read caching is allowed. * HGFS_OPLOCK_EXCLUSIVE: exclusive (or LEVEL I) oplock. Read/write caching is allowed. * HGFS_OPLOCK_BATCH: batch oplock. Read/Write and Open caching is allowed. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestServerLockChangeV2 { HgfsHandle fid; /* File to take lock on. */ HgfsLockType serverLock; /* Lock type. */ uint64 reserved; } #include "vmware_pack_end.h" HgfsRequestServerLockChangeV2; typedef #include "vmware_pack_begin.h" struct HgfsReplyServerLockChangeV2 { HgfsLockType serverLock; /* Lock granted. */ uint64 reserved; } #include "vmware_pack_end.h" HgfsReplyServerLockChangeV2; /* * This request is sent from server to the client to notify that oplock * is revoked or downgraded. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestOplockBreakV4 { HgfsHandle fid; /* File handle. */ HgfsLockType serverLock; /* Lock downgraded to this type. */ uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsRequestOplockBreakV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyOplockBreakV4 { HgfsHandle fid; /* File handle. */ HgfsLockType serverLock; /* Lock type. */ uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsReplyOplockBreakV4; /* * Flusing of a whole volume is not supported. * Flusing of reqular files is supported on all hosts. * Flusing of directories is supproted on POSIX hosts and is * NOOP on Windows hosts. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestFsyncV4 { HgfsHandle fid; /* File to sync. */ uint64 reserved; } #include "vmware_pack_end.h" HgfsRequestFsyncV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyFsyncV4 { uint64 reserved; } #include "vmware_pack_end.h" HgfsReplyFsyncV4; /* * This request is name based only. * Server fails this request if HGFS_FILE_E_USE_FILE_DESC is set in the fileName. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestAccessCheckV4 { HgfsFileNameV3 fileName; /* File concerned. */ HgfsPermissions perms; /* Permissions to check for. */ uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsRequestAccessCheckV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyAccessCheckV4 { uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsReplyAccessCheckV4; /* * Additional HgfsPersmissions type: checks file existense without * requesting any particular access. * Matches F_OK mode parameter for POSIX access (2) API. */ #define HGFS_PERM_EXISTS 8 /* * HGFS_PLATFORM_ALL is a HGFS specific platform independent FSCTL * that correspond to different OS specific codes. * Other types of FSCTL are platform specific to allow better user * experience when guest and host OS are the same. HGFS does not interpret * platform specific FSCTL in any way, it just passes it through to the * host. If the host run appropriate OS it executes FSCTL on user's behalf, * otherwise it fails the request. */ typedef enum HgfsPlatformType { HGFS_PLATFORM_ALL, HGFS_PLATFORM_WINDOWS, HGFS_PLATFORM_LINUX, HGFS_PLATFORM_MAC }HgfsPlatformType; #define HGFS_FSCTL_SET_SPARSE 1 /* Platform independent FSCTL to make file sparse. */ /* Platform together with the code define exact meaning of the operation. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestFsctlV4 { HgfsHandle fid; uint32 code; HgfsPlatformType platform; uint32 dataLength; char data[1]; } #include "vmware_pack_end.h" HgfsRequestFsctlV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyFsctlV4 { uint32 dataLength; char data[1]; } #include "vmware_pack_end.h" HgfsReplyFsctlV4; /* * Creating a new file or reading file attributes involves ACL. There is a good * definition of multi-platform ACLs in RFC 3530, section 5.11. HGFS should use * ACLs defined in this document (http://tools.ietf.org/html/rfc3530#section-5.11). * ACL support is not mandatory. If a request to create file with ACL comes to a host * that does not support ACL, the request should succeed and setting ACL is ignored. * Such behavior is consistent with other file systems. */ typedef uint64 HgfsOpenCreateOptions; /* O_SYMLINK in Mac OS or FILE_FLAG_OPEN_REPARSE_POINT in Windows. */ #define HGFS_OPENCREATE_OPTION_SYMLINK (1 << 0) /* O_SHLOCK in Mac OS or obtain shared range lock for the whole file. */ #define HGFS_OPENCREATE_OPTION_SHLOCK (1 << 1) /* O_EXLOCK in Mac OS or obtain exclusive range lock for the whole file. */ #define HGFS_OPENCREATE_OPTION_EXLOCK (1 << 2) /* O_SYNC in Linux, ignored in Mac, FILE_FLAG_WRITE_THROUGH in Windows. */ #define HGFS_OPENCREATE_OPTION_WRITETHROUGH (1 << 3) /* FILE_FLAG_NO_BUFFERING in Windows, O_SYNC in Linux, ignored on Mac OS. */ #define HGFS_OPENCREATE_OPTION_NO_BUFERING (1 << 4) /* * O_NOFOLLOW in POSIX. Windows server checks for reparse point * and fails the request if file has one. */ #define HGFS_OPENCREATE_OPTION_NO_FOLLOW (1 << 5) /* FILE_FLAG_NO_RECALL in Windows. Ignored by POSIX host. */ #define HGFS_OPENCREATE_OPTION_NO_RECALL (1 << 6) /* FILE_FLAG_RANDOM_ACCESS in Windows. Ignored by POSIX host. */ #define HGFS_OPENCREATE_OPTION_RANDOM (1 << 7) /* FILE_FLAG_SEQUENTIAL_SCAN in Windows. Ignored by POSIX host. */ #define HGFS_OPENCREATE_OPTION_SEQUENTIAL (1 << 8) /* FILE_FLAG_BACKUP_SEMANTICS in Windows. Ignored by POSIX host. */ #define HGFS_OPENCREATE_OPTION_BACKUP_SEMANTICS (1 << 9) /* Fail opening if the file already exists and it is not a directory. */ #define HGFS_OPENCREATE_OPTION_DIRECTORY (1 << 10) /* Fail opening if the file already exists and it is a directory. */ #define HGFS_OPENCREATE_OPTION_NON_DIRECTORY (1 << 11) typedef #include "vmware_pack_begin.h" struct HgfsRequestOpenV4 { HgfsOpenValid mask; /* Bitmask that specified which fields are valid. */ HgfsOpenMode mode; /* Which type of access requested. See desiredAccess */ HgfsOpenFlags flags; /* Which flags to open the file with */ HgfsPermissions specialPerms; /* Desired 'special' permissions for file creation */ HgfsPermissions ownerPerms; /* Desired 'owner' permissions for file creation */ HgfsPermissions groupPerms; /* Desired 'group' permissions for file creation */ HgfsPermissions otherPerms; /* Desired 'other' permissions for file creation */ HgfsAttrFlags attr; /* Attributes, if any, for file creation */ uint64 allocationSize; /* How much space to pre-allocate during creation */ uint32 desiredAccess; /* Extended support for windows access modes */ uint32 shareAccess; /* Windows only, share access modes */ HgfsOpenCreateOptions createOptions; /* Various options. */ HgfsLockType requestedLock; /* The type of lock desired by the client */ HgfsFileNameV3 fileName; /* fid can be used only for relative open, * i.e. to open named stream. */ HgfsFileName streamName; /* Name of the alternative named stream. * All flags are the same as defined in fileName. * The name is used in conjuction with fileName * field, for example if Windows opens file * "abc.txt:stream" then fileName contains * "abc.txt" and streamName contains "stream" */ /* * EA to set if the file is created or overwritten. The parameter should be ignored * if the file already exists. * It is needed to correctly implement Windows semantics for opening files. * It should work atomically - failure to add EA should result in failure to create * the new file. * If the host file system does not support EA server should fail the request rather * then succeeding and silently dropping EA. */ HgfsRequestSetEAV4 extendedAttributes; uint32 aclLength; /* Length of the acl field. */ char acl[1]; /* Multi-platform ACL as defined in RFC 3530. */ } #include "vmware_pack_end.h" HgfsRequestOpenV4; typedef enum HgfsOpenResult { HGFS_FILE_OPENED, HGFS_FILE_CREATED, HGFS_FILE_OVERWRITTEN, HGFS_FILE_SUPERSIDED, } HgfsOpenResult; /* * Win32 API has a special value for the desired access - MAXIMUM_ALLOWED. * Such desired access means that file system must grant as much rights for the file * as it is allowed for the current user. * HGFS client must know what access rights were granted to properly communicate this * information to the IoManager; grantedAccess field is used for this purpose. */ typedef #include "vmware_pack_begin.h" struct HgfsReplyOpenV4 { HgfsHandle file; /* Opaque file ID used by the server */ HgfsLockType grantedLock; /* The type of lock acquired by the server */ HgfsOpenResult openResult; /* Opened/overwritten or a new file created? */ uint32 grantedAccess; /* Granted access rights. */ uint64 fileId; /* Persistent volume-wide unique file id. */ uint64 volumeId; /* Persistent unique volume id. */ } #include "vmware_pack_end.h" HgfsReplyOpenV4; /* * Flags that define behaviour of the move/creating hard link operation. */ typedef uint64 HgfsMoveLinkFlags; #define HGFS_LINKMOVE_FLAG_REPLACE_EXISTING (1 << 0) /* Delete existing target. */ #define HGFS_LINKMOVE_FLAG_HARD_LINK (1 << 1) /* Create hard link. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestLinkMoveV4 { HgfsFileNameV3 oldFileName; /* Path to the exisitng source file.*/ HgfsFileNameV3 newFileName; /* Path to the destinatio name.*/ HgfsMoveLinkFlags flags; /* Flags that define behaviour of the operation.*/ } #include "vmware_pack_end.h" HgfsRequestLinkMoveV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyLinkMove4 { uint64 reserved; /* Reserved for future use. */ } #include "vmware_pack_end.h" HgfsReplyLinkMove4; /* * HgfsQueryVolumeMaskV4 mask in a request defines which volume properties client needs; * mask in a reply defines which properties were actually returned by the host. * * HGFS_QUERY_VOLUME_MASK_SIZE controls totalBytes, freeBytes and availableBytes. * HGFS_QUERY_VOLUME_MASK_FS_CAPABILITIES controls capabilities. * HGFS_QUERY_VOLUME_MASK_ATTRIBUTES controls creationTime. * HGFS_QUERY_VOLUME_MASK_VOLUME_GEOMETRY controls bytesPerSector and sectorPerCluster. * HGFS_QUERY_VOLUME_MASK_VOLUME_LABEL controls volume label. * HGFS_QUERY_VOLUME_MASK_FS_NAME controls fileSystemName. */ typedef uint64 HgfsQueryVolumeMaskV4; #define HGFS_QUERY_VOLUME_MASK_SIZE (1 << 0) #define HGFS_QUERY_VOLUME_MASK_ATTRIBUTES (1 << 1) #define HGFS_QUERY_VOLUME_MASK_FS_CAPABILITIES (1 << 2) #define HGFS_QUERY_VOLUME_MASK_VOLUME_LABEL (1 << 3) #define HGFS_QUERY_VOLUME_MASK_VOLUME_GEOMETRY (1 << 4) #define HGFS_QUERY_VOLUME_MASK_FS_NAME (1 << 5) typedef uint64 HgfsFileSystemCapabilities; #define HGFS_VOLUME_CASE_SENSITIVE (1 << 0) #define HGFS_VOLUME_SUPPORTS_EA (1 << 1) #define HGFS_VOLUME_SUPPORTS_COMPRESSION (1 << 2) #define HGFS_VOLUME_SUPPORTS_SHORT_NAMES (1 << 3) #define HGFS_VOLUME_SUPPORTS_ACL (1 << 4) #define HGFS_VOLUME_READ_ONLY (1 << 5) #define HGFS_VOLUME_SUPPORTS_ENCRYPTION (1 << 6) #define HGFS_VOLUME_SUPPORTS_OBJECT_ID (1 << 7) #define HGFS_VOLUME_SUPPORTS_REMOTE_STORAGE (1 << 8) #define HGFS_VOLUME_SUPPORTS_SYMLINKS (1 << 9) #define HGFS_VOLUME_SUPPORTS_SPARSE_FILES (1 << 10) #define HGFS_VOLUME_SUPPORTS_UNICODE (1 << 11) #define HGFS_VOLUME_SUPPORTS_QUOTA (1 << 12) #define HGFS_VOLUME_SUPPORTS_NAMED_STREAMS (1 << 13) typedef #include "vmware_pack_begin.h" struct HgfsRequestQueryVolumeV4 { HgfsQueryVolumeMaskV4 mask; HgfsFileNameV3 name; } #include "vmware_pack_end.h" HgfsRequestQueryVolumeV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyQueryVolumeV4 { HgfsQueryVolumeMaskV4 mask; /* Identifies which values were set by the host. */ uint64 totalBytes; /* Total volume capacity. */ uint64 freeBytes; /* Free space on the volume. */ uint64 availableBytes; /* Free space available for the user. */ HgfsFileSystemCapabilities capabilities; /* File system capabilities. */ uint64 creationTime; /* Volume creation time. */ uint32 bytesPerSector; /* Sector size for the volume. */ uint32 sectorsPerCluster; /* Cluster size for the volume. */ HgfsFileName volumeLabel; /* Volume name or label. */ HgfsFileName fileSystemName;/* File system name. */ } #include "vmware_pack_end.h" HgfsReplyQueryVolumeV4; typedef uint32 HgfsSearchReadMask; #define HGFS_SEARCH_READ_NAME (1 << 0) #define HGFS_SEARCH_READ_SHORT_NAME (1 << 1) #define HGFS_SEARCH_READ_FILE_SIZE (1 << 2) #define HGFS_SEARCH_READ_ALLOCATION_SIZE (1 << 3) #define HGFS_SEARCH_READ_EA_SIZE (1 << 4) #define HGFS_SEARCH_READ_TIME_STAMP (1 << 5) #define HGFS_SEARCH_READ_FILE_ATTRIBUTES (1 << 6) #define HGFS_SEARCH_READ_FILE_NODE_TYPE (1 << 7) #define HGFS_SEARCH_READ_REPARSE_TAG (1 << 8) #define HGFS_SEARCH_READ_FILE_ID (1 << 9) typedef uint32 HgfsSearchReadFlags; #define HGFS_SEARCH_READ_INITIAL_QUERY (1 << 1) #define HGFS_SEARCH_READ_SINGLE_ENTRY (1 << 2) #define HGFS_SEARCH_READ_FID_OPEN_V4 (1 << 3) #define HGFS_SEARCH_READ_REPLY_FINAL_ENTRY (1 << 4) /* * Read directory request can be used to enumerate files in a directory. * File handle used in the request can be either from HgfsRequestOpenV4 or * HgfsRequestSearchOpenV3. * searchPattern parameter allows filter out file names in the server for optimization. * It is optional - host may ignore patterns and return entries that do not match * the pattern. It is client responsibility to filter out names that do not match * the pattern. * * The mask field in request allows client to specify which properties it is * interested in. It allows to implement optimization in the server by skipping * parameters which client does not need. * * The HGFS Server fills mask field in the reply buffer to specify which * of the requested properties it supports, which may be a subset of the * requested properties. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestSearchReadV4 { HgfsSearchReadMask mask; HgfsSearchReadFlags flags; HgfsHandle fid; uint32 replyDirEntryMaxSize; uint32 restartIndex; uint64 reserved; HgfsFileName searchPattern; } #include "vmware_pack_end.h" HgfsRequestSearchReadV4; typedef #include "vmware_pack_begin.h" struct HgfsDirEntryV4 { uint32 nextEntryOffset; uint32 fileIndex; HgfsSearchReadMask mask; /* Returned mask: may be a subset of requested mask. */ HgfsAttrFlags attrFlags; /* File system attributes of the entry */ HgfsFileType fileType; uint64 fileSize; uint64 allocationSize; uint64 creationTime; uint64 accessTime; uint64 writeTime; uint64 attrChangeTime; uint64 hostFileId; /* File Id of the file on host: inode_t on Linux */ uint32 eaSize; /* Byte size of any extended attributes. */ uint32 reparseTag; /* Windows only: reparse point tag. */ uint64 reserved; /* Reserved for future use. */ HgfsShortFileName shortName; /* Windows only: 8 dot 3 format name. */ HgfsFileName fileName; /* Entry file name. */ } #include "vmware_pack_end.h" HgfsDirEntryV4; typedef #include "vmware_pack_begin.h" struct HgfsReplySearchReadV4 { uint32 numberEntriesReturned; /* number of directory entries in this reply. */ uint32 offsetToContinue; /* Entry index of the directory entry. */ HgfsSearchReadFlags flags; /* Flags to indicate reply specifics */ uint64 reserved; /* Reserved for future use. */ HgfsDirEntryV4 entries[1]; /* Unused as entries transfered using shared memory. */ } #include "vmware_pack_end.h" HgfsReplySearchReadV4; /* * File handle returned by HgfsRequestOpenV4 or later. Descriptors returned by * HgfsHandle fid; earlier versions of HgfsRequestOpen are not supported. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestEnumerateStreamsV4 { uint32 restartIndex; } #include "vmware_pack_end.h" HgfsRequestEnumerateStreamsV4; typedef #include "vmware_pack_begin.h" struct HgfsRequestStreamEntryV4 { uint32 nextEntryOffset; uint32 fileIndex; HgfsFileName fileName; } #include "vmware_pack_end.h" HgfsRequestStreamEntryV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyEnumerateStreamsV4 { uint32 numberEntriesReturned; uint32 offsetToContinue; uint64 reserved; HgfsRequestStreamEntryV4 entries[1]; } #include "vmware_pack_end.h" HgfsReplyEnumerateStreamsV4; typedef #include "vmware_pack_begin.h" struct HgfsRequestGetattrV4 { uint32 mask; uint32 flags; uint64 reserved; HgfsFileNameV3 name; } #include "vmware_pack_end.h" HgfsRequestGetattrV4; /* * V4 reports different file size for symlinks then V3 or V2. * It does not return file name length as EOF - it reports actual EOF. * On POSIX the value is always 0 and on Windows it is an actual EOF of * a file with a reparse point. * Each client must adjust the value for file size according to guest OS rules. * * Mask in HgfsAttr2V2 should be extended to include short name, symlink target and ACL. * If the host does not support a requested feature it is free to clear the * correspondent bit in the mask and ignore the feature. * * Multi-platform notice: symbolic link is represented by a file with REPARSE_POINT * on Windows. Thus Windows supports swtiching a file type between * regular or directory => symlink and back. * Setting symlinkTarget attribute on Windows host results in assigning * reparse point to the host file. */ typedef #include "vmware_pack_begin.h" struct HgfsAttrV4 { HgfsAttrV2 attr; uint32 numberOfLinks; HgfsFileName shortName; HgfsFileName symlinkTarget; uint32 aclLength; uint64 reserved; char acl[1]; } #include "vmware_pack_end.h" HgfsAttrV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyGetattrV4 { HgfsAttrV4 attr; } #include "vmware_pack_end.h" HgfsReplyGetattrV4; typedef #include "vmware_pack_begin.h" struct HgfsRequestSetattrV4 { HgfsAttrHint hints; HgfsAttrV2 attr; uint64 reserved; /* Reserved for future use */ HgfsFileNameV3 fileName; /* Filename used when file handle invalid. */ } #include "vmware_pack_end.h" HgfsRequestSetattrV4; typedef #include "vmware_pack_begin.h" struct HgfsReplySetattrV4 { uint32 mask; /* Defines which attributes were set. */ } #include "vmware_pack_end.h" HgfsReplySetattrV4; /* * Unlike V3 deletion this command can be used to delete both files and directories. * Its semantics depends on whether fid or file path is specified in the fileName. * When path is used it implements/emulates POSIX semantics - name is deleted from * the directory however if the file is opened it is still accessible. When fid is used * the file name disappears from the folder only when the last handle for the file is * closed - Windows style deletion. */ typedef #include "vmware_pack_begin.h" struct HgfsRequestDeleteFileV4 { HgfsFileNameV3 fileName; } #include "vmware_pack_end.h" HgfsRequestDeleteFileV4; typedef #include "vmware_pack_begin.h" struct HgfsReplyDeleteFileV4 { uint64 reserved; } #include "vmware_pack_end.h" HgfsReplyDeleteFileV4; #endif /* _HGFS_PROTO_H_ */ vmhgfs-only/module.h 0000444 0000000 0000000 00000020404 13432725306 013466 0 ustar root root /********************************************************* * Copyright (C) 2006-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * module.h -- * * Global module definitions for the entire vmhgfs driver. */ #ifndef _HGFS_DRIVER_MODULE_H_ #define _HGFS_DRIVER_MODULE_H_ /* Must come before any kernel header file. */ #include "driver-config.h" #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) #include <linux/backing-dev.h> #endif #include <asm/atomic.h> #include "compat_fs.h" #include "compat_semaphore.h" #include "compat_slab.h" #include "compat_spinlock.h" #include "compat_version.h" #include "rpcout.h" #include "hgfsProto.h" #ifndef __user #define __user #endif /* Logging stuff. */ #define LGPFX "VMware hgfs: " #ifdef VMX86_DEVEL extern int LOGLEVEL_THRESHOLD; #define LOG(level, args) ((void) (LOGLEVEL_THRESHOLD >= (level) ? (printk args) : 0)) #else #define LOG(level, args) #endif /* Blocksize to be set in superblock. (XXX how is this used?) */ #define HGFS_BLOCKSIZE 1024 /* The amount of time we'll wait for the backdoor to process our request. */ #define HGFS_REQUEST_TIMEOUT (30 * HZ) /* * Inode number of the root inode. We set this to be non-zero because, * according to glibc source, when the returned inode number in a dirent * is zero, that entry has been deleted. This is presumably when you've done * an opendir, the file is deleted, and then you do a readdir. The point is * that if the root inode is zero, aliases to it (such as '.' and "..") won't * appear in a directory listing. */ #define HGFS_ROOT_INO 1 /* Leave HGFS_ROOT_INO and below out of inode number generation. */ #define HGFS_RESERVED_INO HGFS_ROOT_INO + 1 /* * Macros for accessing members that are private to this code in * sb/inode/file structs. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 5, 0) typedef uid_t kuid_t; typedef gid_t kgid_t; #define from_kuid(_ns, _kuid) (_kuid) #define from_kgid(_ns, _kgid) (_kgid) #define make_kuid(_ns, _uid) (_uid) #define make_kgid(_ns, _gid) (_gid) #endif /* * Since the f_dentry disappeared we do this locally. * It is used quite extensively and only one other driver * is affected by this so it is done locally and not * as part of the common compat_fs.h includes. */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0) #ifndef f_dentry #define f_dentry f_path.dentry #endif #endif #define HGFS_SET_SB_TO_COMMON(sb, common) do { (sb)->s_fs_info = (common); } while (0) #define HGFS_SB_TO_COMMON(sb) ((HgfsSuperInfo *)(sb)->s_fs_info) #define INODE_GET_II_P(_inode) container_of(_inode, HgfsInodeInfo, inode) #if defined VMW_INODE_2618 #define INODE_SET_II_P(inode, info) do { (inode)->i_private = (info); } while (0) #else #define INODE_SET_II_P(inode, info) do { (inode)->u.generic_ip = (info); } while (0) #endif #define HGFS_DECLARE_TIME(unixtm) struct timespec unixtm #define HGFS_EQUAL_TIME(unixtm1, unixtm2) timespec_equal(&unixtm1, &unixtm2) #define HGFS_SET_TIME(unixtm,nttime) HgfsConvertFromNtTimeNsec(&unixtm, nttime) #define HGFS_GET_TIME(unixtm) HgfsConvertTimeSpecToNtTime(&unixtm) #define HGFS_GET_CURRENT_TIME() ({ \ struct timespec ct = CURRENT_TIME; \ HGFS_GET_TIME(ct); \ }) /* * Beware! This macro returns list of two elements. Do not add braces around. */ #define HGFS_PRINT_TIME(unixtm) unixtm.tv_sec, unixtm.tv_nsec /* * For files opened in our actual Host/Guest filesystem, the * file->private_data field is used for storing the HgfsFileInfo of the * opened file. This macro is for accessing the file information from the * file *. */ #define FILE_SET_FI_P(file, info) do { (file)->private_data = info; } while (0) #define FILE_GET_FI_P(file) ((HgfsFileInfo *)(file)->private_data) #define HGFS_MNT_SET_UID (1 << 0) /* Was the UID specified at mount-time? */ #define HGFS_MNT_SET_GID (1 << 1) /* Was the GID specified at mount-time? */ #define HGFS_MNT_SERVER_INUM (1 << 2) /* Use inode numbers from the server? */ /* Data kept in each superblock in sb->u. */ typedef struct HgfsSuperInfo { #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 0, 0) struct backing_dev_info bdi; /* Kernel VFS uses it to check whether our backend need to writeback dirty pages among other things. */ #endif kuid_t uid; /* UID of user who mounted this fs. */ kgid_t gid; /* GID of user who mounted this fs. */ mode_t fmask; /* File permission mask. */ mode_t dmask; /* Directory permission mask. */ uint32 ttl; /* Maximum dentry age (in ticks). */ char *shareName; /* Mounted share name. */ size_t shareNameLen; /* To avoid repeated strlen() calls. */ uint32 mntFlags; /* HGFS mount flags */ } HgfsSuperInfo; /* * HGFS specific per-inode data. */ typedef struct HgfsInodeInfo { /* Embedded inode. */ struct inode inode; /* Inode number given by the host. */ uint64 hostFileId; /* Was the inode number for this inode generated via iunique()? */ Bool isFakeInodeNumber; /* Is this a fake inode created in HgfsCreate that has yet to be opened? */ Bool createdAndUnopened; /* * The number of write back pages to the file which is tracked so any * concurrent file validations such as reads will not invalidate the cache. */ unsigned long numWbPages; struct list_head listWbPages; /* List of open files for this inode. */ struct list_head files; } HgfsInodeInfo; /* * HGFS specific per-file data. */ typedef struct HgfsFileInfo { /* Links to place this object on the inode's list of open files. */ struct list_head list; /* Handle to be sent to the server. Needed for writepage(). */ HgfsHandle handle; /* * Mode with which handle was opened. When we reuse a handle, we need to * choose one with appropriate permissions. */ HgfsOpenMode mode; /* * Do we need to reopen a directory ? Note that this is only used * for directories. */ Bool isStale; /* Directory read position for tracking. */ loff_t direntPos; } HgfsFileInfo; /* * Global synchronization primitives. */ /* * We use hgfsBigLock to protect certain global structures that are locked for * a very short amount of time. */ extern spinlock_t hgfsBigLock; /* Hgfs filesystem structs. */ extern struct super_operations HgfsSuperOperations; extern struct dentry_operations HgfsDentryOperations; extern struct inode_operations HgfsFileInodeOperations; extern struct inode_operations HgfsDirInodeOperations; extern struct inode_operations HgfsLinkInodeOperations; extern struct file_operations HgfsFileFileOperations; extern struct file_operations HgfsDirFileOperations; extern struct address_space_operations HgfsAddressSpaceOperations; /* Other global state. */ extern compat_kmem_cache *hgfsInodeCache; extern HgfsOp hgfsVersionOpen; extern HgfsOp hgfsVersionRead; extern HgfsOp hgfsVersionWrite; extern HgfsOp hgfsVersionClose; extern HgfsOp hgfsVersionSearchOpen; extern HgfsOp hgfsVersionSearchRead; extern HgfsOp hgfsVersionSearchClose; extern HgfsOp hgfsVersionGetattr; extern HgfsOp hgfsVersionSetattr; extern HgfsOp hgfsVersionCreateDir; extern HgfsOp hgfsVersionDeleteFile; extern HgfsOp hgfsVersionDeleteDir; extern HgfsOp hgfsVersionRename; extern HgfsOp hgfsVersionQueryVolumeInfo; extern HgfsOp hgfsVersionCreateSymlink; #endif // _HGFS_DRIVER_MODULE_H_ vmhgfs-only/fsutil.c 0000444 0000000 0000000 00000225343 13432725306 013513 0 ustar root root /********************************************************* * Copyright (C) 2006-2019 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * fsutil.c -- * * Functions used in more than one type of filesystem operation will be * exported from this file. */ /* Must come before any kernel header file. */ #include "driver-config.h" #include <linux/signal.h> /* Must come before compat_dcache. */ #include "compat_fs.h" #include "compat_dcache.h" #include "compat_kernel.h" #include "compat_mm.h" #include "compat_sched.h" #include "compat_slab.h" #include "compat_spinlock.h" #include "vm_assert.h" #include "cpName.h" #include "cpNameLite.h" #include "hgfsUtil.h" #include "module.h" #include "request.h" #include "fsutil.h" #include "hgfsProto.h" #include "vm_basic_types.h" /* * The get inode descriptor object. */ typedef struct HgfsInodeAttrDesc { uint32 flags; const HgfsAttrInfo *attr; } HgfsInodeAttrDesc; #define HGFS_INO_DESC_INO_FAKE (1 << 0) #define HGFS_INO_DESC_INO_COLLISION (1 << 1) static void HgfsSetFileType(struct inode *inode, HgfsAttrInfo const *attr); static int HgfsUnpackGetattrReply(HgfsReq *req, HgfsAttrInfo *attr, char **fileName); static int HgfsBuildRootPath(char *buffer, size_t bufferLen, HgfsSuperInfo *si); static int HgfsBuildFullPath(char *buffer, size_t bufferLen, HgfsSuperInfo *si, struct dentry *dentry); static struct inode *HgfsGetInode(struct super_block *sb, ino_t ino, HgfsInodeAttrDesc *iattrDesc); static void HgfsDoReadInode(struct inode *inode); static int HgfsInitInode(struct inode *inode, void *opaque); static int HgfsFindInode(struct inode *inode, void *opaque); /* * For kernels that are older than 2.6.32 there is no truncate_pagecache call * so we set an empty macro. * If we have the call then we check for kernel 3.12 compatibility. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32) #define HGFS_TRUNCATE_PAGE_CACHE(inode, oldSize, newSize) #elif defined VMW_PAGECACHE_312 || LINUX_VERSION_CODE >= KERNEL_VERSION(3, 12, 0) #define HGFS_TRUNCATE_PAGE_CACHE(inode, oldSize, newSize) truncate_pagecache(inode, newSize) #else #define HGFS_TRUNCATE_PAGE_CACHE(inode, oldSize, newSize) truncate_pagecache(inode, oldSize, newSize) #endif /* * Private function implementations. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(3, 2, 0) && !defined(HAVE_SET_NLINK) /* *---------------------------------------------------------------------------- * * set_nlink -- * * Set an inode's link count. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------------- */ static inline void set_nlink(struct inode *inode, unsigned int nlink) { inode->i_nlink = nlink; } #endif /* *---------------------------------------------------------------------- * * HgfsGetFileType -- * * Get file type from the inode mode. * * Results: * The file type. * * Side effects: * None * *---------------------------------------------------------------------- */ static HgfsFileType HgfsGetFileType(struct inode *inode) // IN: Attrs to use { HgfsFileType type; ASSERT(inode != NULL); switch (inode->i_mode & S_IFMT) { case S_IFLNK: type = HGFS_FILE_TYPE_SYMLINK; break; case S_IFREG: type = HGFS_FILE_TYPE_REGULAR; break; case S_IFDIR: type = HGFS_FILE_TYPE_DIRECTORY; break; default: /* * XXX Should never happen. Since there aren't any other HGFS supported type. */ LOG(4, (KERN_DEBUG LGPFX "%s: UNSUPPORTED inode type %d\n", __func__, inode->i_mode & S_IFMT)); type = 0; break; } LOG(10, (KERN_DEBUG LGPFX "%s: return %d\n", __func__, type)); return type; } /* *---------------------------------------------------------------------- * * HgfsSetFileType -- * * Set file type in inode according to the hgfs attributes. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ static void HgfsSetFileType(struct inode *inode, // IN/OUT: Inode to update HgfsAttrInfo const *attr) // IN: Attrs to use to update { ASSERT(inode); ASSERT(attr); switch (attr->type) { case HGFS_FILE_TYPE_DIRECTORY: inode->i_mode = S_IFDIR; inode->i_op = &HgfsDirInodeOperations; inode->i_fop = &HgfsDirFileOperations; break; case HGFS_FILE_TYPE_SYMLINK: inode->i_mode = S_IFLNK; inode->i_op = &HgfsLinkInodeOperations; break; case HGFS_FILE_TYPE_REGULAR: inode->i_mode = S_IFREG; inode->i_op = &HgfsFileInodeOperations; inode->i_fop = &HgfsFileFileOperations; inode->i_data.a_ops = &HgfsAddressSpaceOperations; break; default: /* * XXX Should never happen. I'd put NOT_IMPLEMENTED() here * but if the driver ever goes in the host it's probably not * a good idea for an attacker to be able to hang the host * simply by using a bogus file type in a reply. [bac] */ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsSetFileType: UNSUPPORTED " "inode type\n")); inode->i_mode = 0; // NOT_IMPLEMENTED(); break; } } /* *---------------------------------------------------------------------- * * HgfsUnpackGetattrReply -- * * This function abstracts the differences between a GetattrV1 and * a GetattrV2. The caller provides the packet containing the reply * and we populate the AttrInfo with version-independent information. * * Note that attr->requestType has already been populated so that we * know whether to expect a V1 or V2 reply. * * Results: * 0 on success, anything else on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsUnpackGetattrReply(HgfsReq *req, // IN: Reply packet HgfsAttrInfo *attr, // IN/OUT: Attributes char **fileName) // OUT: file name { int result; char *name = NULL; uint32 length = 0; ASSERT(req); ASSERT(attr); result = HgfsUnpackCommonAttr(req, attr); if (result != 0) { return result; } /* GetattrV2+ also wants a symlink target if it exists. */ if (attr->requestType == HGFS_OP_GETATTR_V3) { HgfsReplyGetattrV3 *replyV3 = (HgfsReplyGetattrV3 *)(HGFS_REP_PAYLOAD_V3(req)); name = replyV3->symlinkTarget.name; length = replyV3->symlinkTarget.length; /* Skip the symlinkTarget if it's too long. */ if (length > HGFS_NAME_BUFFER_SIZET(req->bufferSize, sizeof *replyV3 + sizeof(HgfsReply))) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackGetattrReply: symlink " "target name too long, ignoring\n")); return -ENAMETOOLONG; } } else if (attr->requestType == HGFS_OP_GETATTR_V2) { HgfsReplyGetattrV2 *replyV2 = (HgfsReplyGetattrV2 *) (HGFS_REQ_PAYLOAD(req)); name = replyV2->symlinkTarget.name; length = replyV2->symlinkTarget.length; /* Skip the symlinkTarget if it's too long. */ if (length > HGFS_NAME_BUFFER_SIZE(req->bufferSize, replyV2)) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackGetattrReply: symlink " "target name too long, ignoring\n")); return -ENAMETOOLONG; } } if (fileName) { if (length != 0) { *fileName = kmalloc(length + 1, GFP_KERNEL); if (*fileName == NULL) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackGetattrReply: out of " "memory allocating symlink target name, ignoring\n")); return -ENOMEM; } /* Copy and convert. From now on, the symlink target is in UTF8. */ memcpy(*fileName, name, length); CPNameLite_ConvertFrom(*fileName, length, '/'); (*fileName)[length] = '\0'; LOG(4, (KERN_DEBUG "VMware hgfs: %s: symlink name %s\n", __func__, *fileName)); } else { *fileName = NULL; } } return 0; } /* *---------------------------------------------------------------------- * * HgfsPackCommonattr -- * * This function abstracts the HgfsAttr struct behind HgfsAttrInfo. * Callers can pass one of four replies into it and receive back the * attributes for those replies. * * Callers must populate attr->requestType so that we know whether to * expect a V1 or V2 Attr struct. * * Results: * Zero on success, non-zero otherwise. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsPackCommonattr(HgfsReq *req, // IN/OUT: request buffer HgfsOp opUsed, // IN: Op to be used HgfsHandle handle, // IN: file handle to use if valid size_t *reqSize, // OUT: request size size_t *reqBufferSize, // OUT: request buffer size char **fileName, // OUT: pointer to request file name uint32 **fileNameLength, // OUT: pointer to request file name length HgfsAttrInfo *attr) // OUT: Attrs to update { int result = 0; attr->requestType = opUsed; switch (opUsed) { case HGFS_OP_GETATTR_V3: { HgfsRequest *requestHeader; HgfsRequestGetattrV3 *requestV3; /* Fill out the request packet. */ requestHeader = (HgfsRequest *)(HGFS_REQ_PAYLOAD(req)); requestHeader->op = opUsed; requestHeader->id = req->id; requestV3 = (HgfsRequestGetattrV3 *)HGFS_REQ_PAYLOAD_V3(req); /* * When possible, issue a getattr using an existing handle. This will * give us slightly better performance on a Windows server, and is more * correct regardless. If we don't find a handle, fall back on getattr * by name. */ requestV3->hints = 0; if (handle != HGFS_INVALID_HANDLE) { requestV3->fileName.flags = HGFS_FILE_NAME_USE_FILE_DESC; requestV3->fileName.fid = handle; requestV3->fileName.length = 0; requestV3->fileName.caseType = HGFS_FILE_NAME_DEFAULT_CASE; *fileName = NULL; *fileNameLength = NULL; } else { *fileName = requestV3->fileName.name; *fileNameLength = &requestV3->fileName.length; requestV3->fileName.flags = 0; requestV3->fileName.fid = HGFS_INVALID_HANDLE; requestV3->fileName.caseType = HGFS_FILE_NAME_CASE_SENSITIVE; } requestV3->reserved = 0; *reqSize = HGFS_REQ_PAYLOAD_SIZE_V3(requestV3); *reqBufferSize = HGFS_NAME_BUFFER_SIZET(req->bufferSize, *reqSize); break; } case HGFS_OP_GETATTR_V2: { HgfsRequestGetattrV2 *requestV2; requestV2 = (HgfsRequestGetattrV2 *)(HGFS_REQ_PAYLOAD(req)); requestV2->header.op = opUsed; requestV2->header.id = req->id; /* * When possible, issue a getattr using an existing handle. This will * give us slightly better performance on a Windows server, and is more * correct regardless. If we don't find a handle, fall back on getattr * by name. */ if (handle != HGFS_INVALID_HANDLE) { requestV2->hints = HGFS_ATTR_HINT_USE_FILE_DESC; requestV2->file = handle; *fileName = NULL; *fileNameLength = NULL; } else { requestV2->hints = 0; *fileName = requestV2->fileName.name; *fileNameLength = &requestV2->fileName.length; } *reqSize = sizeof *requestV2; *reqBufferSize = HGFS_NAME_BUFFER_SIZE(req->bufferSize, requestV2); break; } case HGFS_OP_GETATTR: { HgfsRequestGetattr *requestV1; requestV1 = (HgfsRequestGetattr *)(HGFS_REQ_PAYLOAD(req)); requestV1->header.op = opUsed; requestV1->header.id = req->id; *fileName = requestV1->fileName.name; *fileNameLength = &requestV1->fileName.length; *reqSize = sizeof *requestV1; *reqBufferSize = HGFS_NAME_BUFFER_SIZE(req->bufferSize, requestV1); break; } default: LOG(4, (KERN_DEBUG "VMware hgfs: %s: unexpected OP type encountered\n", __func__)); result = -EPROTO; break; } return result; } /* *---------------------------------------------------------------------- * * HgfsPackGetattrRequestInt -- * * Setup the getattr request, depending on the op version. When possible, * we will issue the getattr using an existing open HGFS handle. * * Results: * Returns zero on success, or negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsPackGetattrRequestInt(HgfsReq *req, // IN/OUT: Request buffer HgfsOp opUsed, // IN: Op to be used Bool allowHandleReuse, // IN: Can we use a handle? HgfsSuperInfo *si, // IN: super block info struct dentry *dentry, // IN: Optional dentry containing name/handle HgfsAttrInfo *attr) // OUT: Attrs to update { size_t reqBufferSize; size_t reqSize; char *fileName = NULL; uint32 *fileNameLength = NULL; HgfsHandle handle = HGFS_INVALID_HANDLE; int result = 0; ASSERT(attr != NULL); ASSERT(req != NULL); ASSERT(si != NULL); if (allowHandleReuse) { /* The dentry must be valid if the caller wants to use a file handle. */ ASSERT(dentry != NULL); /* Errors are dropped getting the file handle, as we will use the name instead. */ (void)HgfsGetHandle(dentry->d_inode, 0, &handle); } result = HgfsPackCommonattr(req, opUsed, handle, &reqSize, &reqBufferSize, &fileName, &fileNameLength, attr); if (0 > result) { goto out; } /* Avoid all this extra work when we're doing a getattr by handle. */ if (fileName != NULL) { /* Build full name to send to server. */ if (HgfsBuildFullPath(fileName, reqBufferSize, si, dentry) < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: build path failed\n", __func__)); result = -EINVAL; goto out; } LOG(6, (KERN_DEBUG "VMware hgfs: %s: getting attrs for \"%s\"\n", __func__, fileName)); /* Convert to CP name. */ result = CPName_ConvertTo(fileName, reqBufferSize, fileName); if (result < 0) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: CP conversion failed\n", __func__)); result = -EINVAL; goto out; } *fileNameLength = result; } req->payloadSize = reqSize + result; result = 0; out: return result; } /* * Public function implementations. */ /* *---------------------------------------------------------------------- * * HgfsUnpackCommonAttr -- * * This function abstracts the HgfsAttr struct behind HgfsAttrInfo. * Callers can pass one of four replies into it and receive back the * attributes for those replies. * * Callers must populate attr->requestType so that we know whether to * expect a V1 or V2 Attr struct. * * Results: * Zero on success, non-zero otherwise. * * Side effects: * None * *---------------------------------------------------------------------- */ int HgfsUnpackCommonAttr(HgfsReq *req, // IN: Reply packet HgfsAttrInfo *attrInfo) // OUT: Attributes { HgfsReplyGetattrV3 *getattrReplyV3; HgfsReplyGetattrV2 *getattrReplyV2; HgfsReplyGetattr *getattrReplyV1; HgfsReplySearchReadV3 *searchReadReplyV3; HgfsReplySearchReadV2 *searchReadReplyV2; HgfsReplySearchRead *searchReadReplyV1; HgfsDirEntry *dirent; HgfsAttrV2 *attrV2 = NULL; HgfsAttr *attrV1 = NULL; ASSERT(req); ASSERT(attrInfo); switch (attrInfo->requestType) { case HGFS_OP_GETATTR_V3: getattrReplyV3 = (HgfsReplyGetattrV3 *)(HGFS_REP_PAYLOAD_V3(req)); attrV2 = &getattrReplyV3->attr; break; case HGFS_OP_GETATTR_V2: getattrReplyV2 = (HgfsReplyGetattrV2 *)(HGFS_REQ_PAYLOAD(req)); attrV2 = &getattrReplyV2->attr; break; case HGFS_OP_GETATTR: getattrReplyV1 = (HgfsReplyGetattr *)(HGFS_REQ_PAYLOAD(req)); attrV1 = &getattrReplyV1->attr; break; case HGFS_OP_SEARCH_READ_V3: searchReadReplyV3 = (HgfsReplySearchReadV3 *)(HGFS_REP_PAYLOAD_V3(req)); dirent = (HgfsDirEntry *)searchReadReplyV3->payload; attrV2 = &dirent->attr; break; case HGFS_OP_SEARCH_READ_V2: searchReadReplyV2 = (HgfsReplySearchReadV2 *)(HGFS_REQ_PAYLOAD(req)); attrV2 = &searchReadReplyV2->attr; break; case HGFS_OP_SEARCH_READ: searchReadReplyV1 = (HgfsReplySearchRead *)(HGFS_REQ_PAYLOAD(req)); attrV1 = &searchReadReplyV1->attr; break; default: LOG(4, (KERN_DEBUG "VMware hgfs: HgfsUnpackCommonAttr: unexpected op " "in reply packet\n")); return -EPROTO; } if (attrV2 != NULL) { attrInfo->mask = 0; if (attrV2->mask & HGFS_ATTR_VALID_TYPE) { attrInfo->type = attrV2->type; attrInfo->mask |= HGFS_ATTR_VALID_TYPE; } if (attrV2->mask & HGFS_ATTR_VALID_SIZE) { attrInfo->size = attrV2->size; attrInfo->mask |= HGFS_ATTR_VALID_SIZE; } if (attrV2->mask & HGFS_ATTR_VALID_ALLOCATION_SIZE) { attrInfo->allocSize = attrV2->allocationSize; attrInfo->mask |= HGFS_ATTR_VALID_ALLOCATION_SIZE; } if (attrV2->mask & HGFS_ATTR_VALID_ACCESS_TIME) { attrInfo->accessTime = attrV2->accessTime; attrInfo->mask |= HGFS_ATTR_VALID_ACCESS_TIME; } if (attrV2->mask & HGFS_ATTR_VALID_WRITE_TIME) { attrInfo->writeTime = attrV2->writeTime; attrInfo->mask |= HGFS_ATTR_VALID_WRITE_TIME; } if (attrV2->mask & HGFS_ATTR_VALID_CHANGE_TIME) { attrInfo->attrChangeTime = attrV2->attrChangeTime; attrInfo->mask |= HGFS_ATTR_VALID_CHANGE_TIME; } if (attrV2->mask & HGFS_ATTR_VALID_SPECIAL_PERMS) { attrInfo->specialPerms = attrV2->specialPerms; attrInfo->mask |= HGFS_ATTR_VALID_SPECIAL_PERMS; } if (attrV2->mask & HGFS_ATTR_VALID_OWNER_PERMS) { attrInfo->ownerPerms = attrV2->ownerPerms; attrInfo->mask |= HGFS_ATTR_VALID_OWNER_PERMS; } if (attrV2->mask & HGFS_ATTR_VALID_GROUP_PERMS) { attrInfo->groupPerms = attrV2->groupPerms; attrInfo->mask |= HGFS_ATTR_VALID_GROUP_PERMS; } if (attrV2->mask & HGFS_ATTR_VALID_OTHER_PERMS) { attrInfo->otherPerms = attrV2->otherPerms; attrInfo->mask |= HGFS_ATTR_VALID_OTHER_PERMS; } if (attrV2->mask & HGFS_ATTR_VALID_USERID) { attrInfo->userId = attrV2->userId; attrInfo->mask |= HGFS_ATTR_VALID_USERID; } if (attrV2->mask & HGFS_ATTR_VALID_GROUPID) { attrInfo->groupId = attrV2->groupId; attrInfo->mask |= HGFS_ATTR_VALID_GROUPID; } if (attrV2->mask & (HGFS_ATTR_VALID_FILEID | HGFS_ATTR_VALID_NON_STATIC_FILEID)) { attrInfo->hostFileId = attrV2->hostFileId; attrInfo->mask |= HGFS_ATTR_VALID_FILEID; } if (attrV2->mask & HGFS_ATTR_VALID_EFFECTIVE_PERMS) { attrInfo->effectivePerms = attrV2->effectivePerms; attrInfo->mask |= HGFS_ATTR_VALID_EFFECTIVE_PERMS; } } else if (attrV1 != NULL) { /* Implicit mask for a Version 1 attr. */ attrInfo->mask = HGFS_ATTR_VALID_TYPE | HGFS_ATTR_VALID_SIZE | HGFS_ATTR_VALID_ACCESS_TIME | HGFS_ATTR_VALID_WRITE_TIME | HGFS_ATTR_VALID_CHANGE_TIME | HGFS_ATTR_VALID_OWNER_PERMS | HGFS_ATTR_VALID_EFFECTIVE_PERMS; attrInfo->type = attrV1->type; attrInfo->size = attrV1->size; attrInfo->accessTime = attrV1->accessTime; attrInfo->writeTime = attrV1->writeTime; attrInfo->attrChangeTime = attrV1->attrChangeTime; attrInfo->ownerPerms = attrV1->permissions; attrInfo->effectivePerms = attrV1->permissions; } return 0; } /* *---------------------------------------------------------------------- * * HgfsCalcBlockSize -- * * Calculate the number of 512 byte blocks used. * * Round the size to the next whole block and divide by the block size * to get the number of 512 byte blocks. * Note, this is taken from the nfs client and is simply performing: * (size + 512-1)/ 512) * * Results: * The number of 512 byte blocks for the size. * * Side effects: * None * *---------------------------------------------------------------------- */ #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 17) static inline blkcnt_t HgfsCalcBlockSize(uint64 tsize) { blkcnt_t used = (tsize + 511) >> 9; return (used > ULONG_MAX) ? ULONG_MAX : used; } #else static inline unsigned long HgfsCalcBlockSize(uint64 tsize) { loff_t used = (tsize + 511) >> 9; return (used > ULONG_MAX) ? ULONG_MAX : used; } #endif static inline int hgfs_timespec_compare(const struct timespec *lhs, const struct timespec *rhs) { if (lhs->tv_sec < rhs->tv_sec) return -1; if (lhs->tv_sec > rhs->tv_sec) return 1; return lhs->tv_nsec - rhs->tv_nsec; } /* *---------------------------------------------------------------------- * * HgfsSetInodeUidGid -- * * Set the UID and GID of the inode. * * Update an inode's UID and GID to match those of the HgfsAttr returned * by the server. * * Results: * The number of 512 byte blocks for the size. * * Side effects: * None * *---------------------------------------------------------------------- */ void HgfsSetInodeUidGid(struct inode *inode, // IN/OUT: Inode HgfsSuperInfo *si, // IN: New attrs HgfsAttrInfo const *attr) // IN: New attrs { /* * Use the stored uid and gid if we were given them at mount-time, or if * the server didn't give us a uid or gid. */ if ((si->mntFlags & HGFS_MNT_SET_UID) != 0 || (attr->mask & HGFS_ATTR_VALID_USERID) == 0) { inode->i_uid = si->uid; } else { kuid_t attrUid = make_kuid(&init_user_ns, attr->userId); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) if (uid_valid(attrUid)) { inode->i_uid = attrUid; } else { inode->i_uid = si->uid; } #else inode->i_uid = attrUid; #endif LOG(6, (KERN_DEBUG "VMware hgfs: %s: inode uid %u\n", __func__, from_kuid(&init_user_ns, inode->i_uid))); } if ((si->mntFlags & HGFS_MNT_SET_GID) != 0 || (attr->mask & HGFS_ATTR_VALID_GROUPID) == 0) { inode->i_gid = si->gid; } else { kgid_t attrGid = make_kgid(&init_user_ns, attr->groupId); #if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 5, 0) if (gid_valid(attrGid)) { inode->i_gid = attrGid; } else { inode->i_gid = si->gid; } #else inode->i_gid = attrGid; #endif LOG(6, (KERN_DEBUG "VMware hgfs: %s: inode gid %u\n", __func__, from_kgid(&init_user_ns, inode->i_gid))); } } /* *----------------------------------------------------------------------------- * * HgfsIsInodeWritable -- * * Helper function for verifying if a file is under write access. * * Results: * TRUE if file is writable, FALSE otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static Bool HgfsIsInodeWritable(struct inode *inode) // IN: File we're writing to { HgfsInodeInfo *iinfo; struct list_head *cur; Bool isWritable = FALSE; iinfo = INODE_GET_II_P(inode); /* * Iterate over the open handles for this inode, and find if there * is one that allows the write mode. * Note, the mode is stored as incremented by one to prevent overload of * the zero value. */ spin_lock(&hgfsBigLock); list_for_each(cur, &iinfo->files) { HgfsFileInfo *finfo = list_entry(cur, HgfsFileInfo, list); if (0 != (finfo->mode & (HGFS_OPEN_MODE_WRITE_ONLY + 1))) { isWritable = TRUE; break; } } spin_unlock(&hgfsBigLock); return isWritable; } /* *----------------------------------------------------------------------------- * * HgfsIsSafeToChange -- * * Helper function for verifying if a file inode size and time fields is safe * to update. It is deemed safe only if there is not an open writer to the file. * * Results: * TRUE if safe to change inode, FALSE otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static Bool HgfsIsSafeToChange(struct inode *inode) // IN: File we're writing to { return !HgfsIsInodeWritable(inode); } /* *---------------------------------------------------------------------- * * HgfsChangeFileAttributes -- * * Update an inode's attributes to match those of the HgfsAttr. May * cause dirty pages to be flushed, and may invalidate cached pages, * if there was a change in the file size or modification time in * the server. * * Results: * None * * Side effects: * None * *---------------------------------------------------------------------- */ void HgfsChangeFileAttributes(struct inode *inode, // IN/OUT: Inode HgfsAttrInfo const *attr) // IN: New attrs { HgfsSuperInfo *si; HgfsInodeInfo *iinfo; loff_t fileSize = 0; Bool fileSizeChanged = FALSE; Bool needInvalidate = FALSE; Bool isSafeToChange; ASSERT(inode); ASSERT(inode->i_sb); ASSERT(attr); si = HGFS_SB_TO_COMMON(inode->i_sb); iinfo = INODE_GET_II_P(inode); /* * We do not want to update the file size from server or invalidate the inode * for inodes open for write. We need to avoid races with the write page * extending the file. This also will cause the server to possibly update the * server side file's mod time too. For those situations we do not want to blindly * go and invalidate the inode pages thus losing changes in flight and corrupting the * file. * We only need to invalidate the inode pages if the file has truly been modified * on the server side by another server side application, not by our writes. * If there are no writers it is safe to assume that newer mod time means the file * changed on the server side underneath us. */ isSafeToChange = HgfsIsSafeToChange(inode); spin_lock(&inode->i_lock); iinfo = INODE_GET_II_P(inode); LOG(6, (KERN_DEBUG "VMware hgfs: HgfsChangeFileAttributes: entered\n")); HgfsSetFileType(inode, attr); /* * Set the access mode. For hosts that don't give us group or other * bits (Windows), we use the owner bits in their stead. */ inode->i_mode &= ~S_IALLUGO; if (attr->mask & HGFS_ATTR_VALID_SPECIAL_PERMS) { inode->i_mode |= (attr->specialPerms << 9); } if (attr->mask & HGFS_ATTR_VALID_OWNER_PERMS) { inode->i_mode |= (attr->ownerPerms << 6); } if (attr->mask & HGFS_ATTR_VALID_GROUP_PERMS) { inode->i_mode |= (attr->groupPerms << 3); } else { inode->i_mode |= ((inode->i_mode & S_IRWXU) >> 3); } if (attr->mask & HGFS_ATTR_VALID_OTHER_PERMS) { inode->i_mode |= (attr->otherPerms); } else { inode->i_mode |= ((inode->i_mode & S_IRWXU) >> 6); } /* Mask the access mode. */ switch (attr->type) { case HGFS_FILE_TYPE_REGULAR: inode->i_mode &= ~si->fmask; break; case HGFS_FILE_TYPE_DIRECTORY: inode->i_mode &= ~si->dmask; break; default: /* Nothing else gets masked. */ break; } /* * This field is used to represent the number of hard links. If the file is * really a file, this is easy; our filesystem doesn't support hard-linking, * so we just set it to 1. If the field is a directory, the number of links * represents the number of subdirectories, including '.' and "..". * * In either case, what we're doing isn't ideal. We've carefully tracked the * number of links through calls to HgfsMkdir and HgfsDelete, and now some * revalidate will make us trample on the number of links. But we have no * choice: someone on the server may have made our local view of the number * of links inconsistent (by, say, removing a directory) , and without the * ability to retrieve nlink via getattr, we have no way of knowing that. * * XXX: So in the future, adding nlink to getattr would be nice. At that * point we may as well just implement hard links anyway. Note that user * programs seem to have issues with a link count greater than 1 that isn't * accurate. I experimented with setting nlink to 2 for directories (to * account for '.' and ".."), and find printed a hard link error. So until * we have getattr support for nlink, everyone gets 1. */ set_nlink(inode, 1); HgfsSetInodeUidGid(inode, si, attr); inode->i_rdev = 0; /* Device nodes are not supported */ #if !defined VMW_INODE_2618 inode->i_blksize = HGFS_BLOCKSIZE; #endif /* * Invalidate cached pages if we didn't receive the file size, or if it has * changed on the server, and no writes in flight. */ if (attr->mask & HGFS_ATTR_VALID_SIZE) { fileSize = compat_i_size_read(inode); LOG(8, (KERN_DEBUG "VMware hgfs: %s: srv size: %"FMT64"u, inode size: %Lu\n", __func__, attr->size, fileSize)); if (fileSize != attr->size) { if (iinfo->numWbPages == 0 && isSafeToChange) { fileSizeChanged = needInvalidate = TRUE; LOG(4, (KERN_DEBUG "VMware hgfs: HgfsChangeFileAttributes: new file " "size: %"FMT64"u, old file size: %Lu\n", attr->size, fileSize)); compat_i_size_write(inode, attr->size); } } fileSize = compat_i_size_read(inode); if ((attr->mask & HGFS_ATTR_VALID_ALLOCATION_SIZE) != 0) { inode->i_blocks = HgfsCalcBlockSize(attr->allocSize); } else { uint64 allocSize = ROUNDUP(fileSize, HGFS_BLOCKSIZE); inode->i_blocks = HgfsCalcBlockSize(allocSize); } LOG(8, (KERN_DEBUG "VMware hgfs: %s: inode: size %Lu, blks %u\n", __func__, fileSize, (uint32)inode->i_blocks)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsChangeFileAttributes: did not " "get file size\n")); } if (attr->mask & HGFS_ATTR_VALID_ACCESS_TIME) { HGFS_SET_TIME(inode->i_atime, attr->accessTime); } else { HGFS_SET_TIME(inode->i_atime, HGFS_GET_CURRENT_TIME()); } /* * Invalidate cached pages if we didn't receive the modification time, or if * it has changed on the server and we don't have writes in flight and any open * open writers. */ if (attr->mask & HGFS_ATTR_VALID_WRITE_TIME) { HGFS_DECLARE_TIME(newTime); HGFS_SET_TIME(newTime, attr->writeTime); LOG(4, (KERN_DEBUG "VMware hgfs: %s: server mod " "time: %ld:%lu, inode mod time: %ld:%lu\n", __func__, HGFS_PRINT_TIME(newTime), HGFS_PRINT_TIME(inode->i_mtime))); if (hgfs_timespec_compare(&newTime, &inode->i_mtime) > 0 && iinfo->numWbPages == 0 && isSafeToChange) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsChangeFileAttributes: new mod " "time: %ld:%lu, old mod time: %ld:%lu\n", HGFS_PRINT_TIME(newTime), HGFS_PRINT_TIME(inode->i_mtime))); needInvalidate = TRUE; } HGFS_SET_TIME(inode->i_mtime, attr->writeTime); } else { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsChangeFileAttributes: did not " "get mod time\n")); HGFS_SET_TIME(inode->i_mtime, HGFS_GET_CURRENT_TIME()); } /* * Windows doesn't know about ctime, and might send us something * bogus; if the ctime is invalid, use the mtime instead. */ if (attr->mask & HGFS_ATTR_VALID_CHANGE_TIME) { if (HGFS_SET_TIME(inode->i_ctime, attr->attrChangeTime)) { inode->i_ctime = inode->i_mtime; } } else { HGFS_SET_TIME(inode->i_ctime, HGFS_GET_CURRENT_TIME()); } spin_unlock(&inode->i_lock); if (fileSizeChanged) { HGFS_TRUNCATE_PAGE_CACHE(inode, fileSize, attr->size); } /* * Compare old size and write time with new size and write time. If there's * a difference (or if we didn't get a new size or write time), the file * must have been written to, and we need to invalidate our cached pages. */ if (S_ISREG(inode->i_mode) && needInvalidate) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsChangeFileAttributes: file has " "changed on the server, invalidating pages.\n")); compat_filemap_write_and_wait(inode->i_mapping); if (inode->i_mapping && inode->i_mapping->nrpages != 0) { invalidate_inode_pages2(inode->i_mapping); } } } /* *---------------------------------------------------------------------- * * HgfsCanRetryGetattrRequest -- * * Checks the getattr request version and downgrades the global getattr * version if we can. * * Results: * Returns TRUE on success and downgrades the global getattr protocol version, * or FALSE if no retry is possible. * * Side effects: * None * *---------------------------------------------------------------------- */ static Bool HgfsCanRetryGetattrRequest(HgfsOp getattrOp) // IN: getattrOp version used { Bool canRetry = FALSE; /* Retry with older version(s). Set globally. */ if (getattrOp == HGFS_OP_GETATTR_V3) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: Version 3 " "not supported. Falling back to version 2.\n", __func__)); hgfsVersionGetattr = HGFS_OP_GETATTR_V2; canRetry = TRUE; } else if (getattrOp == HGFS_OP_GETATTR_V2) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: Version 2 " "not supported. Falling back to version 1.\n", __func__)); hgfsVersionGetattr = HGFS_OP_GETATTR; canRetry = TRUE; } return canRetry; } /* *---------------------------------------------------------------------- * * HgfsSendGetattrRequest -- * * Send the getattr request and handle the reply. * * Results: * Returns zero on success, or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ int HgfsSendGetattrRequest(HgfsReq *req, // IN: getattr request Bool *doRetry, // OUT: Retry getattr request Bool *allowHandleReuse, // IN/OUT: handle reuse HgfsAttrInfo *attr, // OUT: Attr to copy into char **fileName) // OUT: pointer to allocated file name { int result; *doRetry = FALSE; result = HgfsSendRequest(req); if (result == 0) { HgfsStatus replyStatus = HgfsReplyStatus(req); result = HgfsStatusConvertToLinux(replyStatus); LOG(6, (KERN_DEBUG "VMware hgfs: %s: reply status %d -> %d\n", __func__, replyStatus, result)); /* * If the getattr succeeded on the server, copy the stats * into the HgfsAttrInfo, otherwise return an error. */ switch (result) { case 0: result = HgfsUnpackGetattrReply(req, attr, fileName); break; case -EIO: /* * Fix for bug 548177. * When user deletes a share, we still show that share during directory * enumeration to minimize user's surprise. Now when we get getattr on * that share server returns EIO. Linux file manager doesn't like this, * and it doesn't display any valid shares too. So as a workaround, we * remap EIO to success and create minimal fake attributes. */ LOG(1, (KERN_DEBUG "Hgfs: %s: Server returned EIO on unknown file\n", __func__)); /* Create fake attributes */ attr->mask = HGFS_ATTR_VALID_TYPE | HGFS_ATTR_VALID_SIZE; attr->type = HGFS_FILE_TYPE_DIRECTORY; attr->size = 0; result = 0; break; case -EBADF: /* * This can happen if we attempted a getattr by handle and the handle * was closed. Because we have no control over the backdoor, it's * possible that an attacker closed our handle, in which case the * driver still thinks the handle is open. So a straight-up * "goto retry" would cause an infinite loop. Instead, let's retry * with a getattr by name. */ if (*allowHandleReuse) { *allowHandleReuse = FALSE; *doRetry = TRUE; } /* * There's no reason why the server should have sent us this error * when we haven't used a handle. But to prevent an infinite loop in * the driver, let's make sure that we don't retry again. */ break; case -EPROTO: /* Retry with older version(s). Set globally. */ if (HgfsCanRetryGetattrRequest(attr->requestType)) { *doRetry = TRUE; } break; default: break; } } else if (result == -EIO) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: timed out\n", __func__)); } else if (result == -EPROTO) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: protocol error: %d\n", __func__, result)); } else { LOG(4, (KERN_DEBUG "VMware hgfs: %s: unknown error: %d\n", __func__, result)); } return result; } /* *---------------------------------------------------------------------- * * HgfsPrivateGetattrCmn -- * * The common getattr request. Send a getattr request to the server * for the indicated remote name, and if it succeeds copy the * results of the getattr into the provided HgfsAttrInfo. * * Results: * Returns zero on success, or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ static int HgfsPrivateGetattrCmn(HgfsSuperInfo *si, // IN: Super block info struct dentry *dentry, // IN: Optional dentry containing name/handle Bool allowHandleReuse, // IN: Use handle instead of name HgfsAttrInfo *attr, // OUT: Attr to copy into char **fileName) // OUT: Optional file name { HgfsReq *req; HgfsOp opUsed; int result = 0; Bool doRetry; ASSERT(si != NULL); ASSERT(attr != NULL); req = HgfsGetNewRequest(); if (!req) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: out of memory " "while getting new request\n", __func__)); result = -ENOMEM; goto out; } retry: opUsed = hgfsVersionGetattr; result = HgfsPackGetattrRequestInt(req, opUsed, allowHandleReuse, si, dentry, attr); if (result != 0) { LOG(4, (KERN_DEBUG "VMware hgfs: %s: no attrs\n", __func__)); goto out; } result = HgfsSendGetattrRequest(req, &doRetry, &allowHandleReuse, attr, fileName); if (0 != result && doRetry) { goto retry; } out: HgfsFreeRequest(req); return result; } /* *---------------------------------------------------------------------- * * HgfsPrivateGetattrRoot -- * * The getattr for the root. Send a getattr request to the server * for the indicated remote name, and if it succeeds copy the * results of the getattr into the provided HgfsAttrInfo. * * fileName (of the root) will be set to a newly allocated string. * * Results: * Returns zero on success, or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ int HgfsPrivateGetattrRoot(struct super_block *sb, // IN: Super block object HgfsAttrInfo *attr) // OUT: Attr to copy into { HgfsSuperInfo *si; ASSERT(sb != NULL); si = HGFS_SB_TO_COMMON(sb); return HgfsPrivateGetattrCmn(si, NULL, FALSE, attr, NULL); } /* *---------------------------------------------------------------------- * * HgfsPrivateGetattr -- * * Internal getattr routine. Send a getattr request to the server * for the indicated remote name, and if it succeeds copy the * results of the getattr into the provided HgfsAttrInfo. * * fileName (if supplied) will be set to a newly allocated string * if the file is a symlink; it's the caller's duty to free it. * * Results: * Returns zero on success, or a negative error on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ int HgfsPrivateGetattr(struct dentry *dentry, // IN: Dentry containing name HgfsAttrInfo *attr, // OUT: Attr to copy into char **fileName) // OUT: pointer to allocated file name { HgfsSuperInfo *si; ASSERT(dentry); ASSERT(dentry->d_sb); si = HGFS_SB_TO_COMMON(dentry->d_sb); return HgfsPrivateGetattrCmn(si, dentry, TRUE, attr, fileName); } /* *----------------------------------------------------------------------------- * * HgfsIget -- * * Lookup or create an inode with the given attributes and remote filename. * * If an inode number of zero is specified, we'll extract an inode number * either from the attributes, or from calling iunique(). * * Results: * The inode on success * NULL on failure * * Side effects: * None * *----------------------------------------------------------------------------- */ struct inode * HgfsIget(struct super_block *sb, // IN: Superblock of this fs ino_t ino, // IN: Inode number (optional) HgfsAttrInfo const *attr) // IN: Attributes to create with { HgfsInodeAttrDesc iattrDesc; HgfsSuperInfo *si; struct inode *inode; ino_t hashedId = ino; ASSERT(sb); ASSERT(attr); LOG(6, (KERN_DEBUG "VMware hgfs: HgfsIget: entered\n")); si = HGFS_SB_TO_COMMON(sb); /* Initialize the descriptor to use for finding an inode. */ iattrDesc.flags = 0; iattrDesc.attr = attr; retry: /* No inode number? Use what's in the attributes, or call iunique(). */ if (hashedId == 0) { /* * Try and use the server supplied ID if possible otherwise * failover to call iunique to generate one. */ if ((si->mntFlags & HGFS_MNT_SERVER_INUM) != 0 && (attr->mask & HGFS_ATTR_VALID_FILEID) != 0) { hashedId = HgfsUniqueidToIno(attr->hostFileId); } else { iattrDesc.flags |= HGFS_INO_DESC_INO_FAKE; } } if ((iattrDesc.flags & HGFS_INO_DESC_INO_FAKE) != 0) { hashedId = iunique(sb, HGFS_RESERVED_INO); hashedId = HgfsUniqueidToIno(hashedId); } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsIget: calling iget on inode number " "%lu\n", hashedId)); /* Now try to find an inode for that number. */ inode = HgfsGetInode(sb, hashedId, &iattrDesc); if (inode) { if ((iattrDesc.flags & HGFS_INO_DESC_INO_COLLISION) != 0) { LOG(6, (KERN_DEBUG LGPFX "%s: collision using %lu\n", __func__, hashedId)); iput(inode); iattrDesc.flags &= ~HGFS_INO_DESC_INO_COLLISION; iattrDesc.flags |= HGFS_INO_DESC_INO_FAKE; goto retry; } if ((inode->i_state & I_NEW) != 0) { inode->i_ino = hashedId; HgfsDoReadInode(inode); unlock_new_inode(inode); } HgfsChangeFileAttributes(inode, attr); } LOG(6, (KERN_DEBUG "VMware hgfs: HgfsIget: done\n")); return inode; } /* *----------------------------------------------------------------------------- * * HgfsInstantiateInode -- * * Instantiate an inode. * Look up or create a new inode based on the attributes and inode number * (if supplied). * If an inode number of zero may be specified, in which case HgfsIget will * get one from the server or, barring that, from iunique(). * * Results: * Zero on success, negative error otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int HgfsInstantiateInode(struct super_block *sb, // IN: Super block ino_t ino, // IN: Inode number (optional) HgfsAttrInfo const *attr, // IN: Attributes to use (optional) struct inode **inode) // OUT: instantiated inode { int result = 0; ASSERT(sb != NULL); ASSERT(attr != NULL); ASSERT(inode != NULL); LOG(8, (KERN_DEBUG LGPFX "%s: entered\n", __func__)); /* * Get the inode with this inode number and the attrs we got from * the server. */ *inode = HgfsIget(sb, ino, attr); if (*inode == NULL) { LOG(4, (KERN_DEBUG LGPFX "%s: error getting inode\n", __func__)); result = -ENOMEM; } return result; } /* *----------------------------------------------------------------------------- * * HgfsInstantiateDentry -- * * Instantiate a dentry. * Associate a dentry to a looked up or created inode. * * Results: * Zero on success, negative error otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ static int HgfsInstantiateDentry(struct inode *inode, // IN: inode Bool rootDentry, // IN: is root dentry or not struct dentry **dentry) // IN/OUT: dentry to instantiate { int result = 0; ASSERT(dentry != NULL); ASSERT(inode != NULL); LOG(8, (KERN_DEBUG LGPFX "%s: instantiating dentry\n", __func__)); if (rootDentry) { /* * Now the initialization of the inode is complete we can create * the root dentry which has flags initialized from the inode itself. */ *dentry = d_make_root(inode); if (*dentry == NULL) { LOG(4, (KERN_WARNING LGPFX "%s: error make root dentry\n", __func__)); result = -ENOMEM; goto exit; } } ASSERT(*dentry != NULL); HgfsDentryAgeReset(*dentry); (*dentry)->d_op = &HgfsDentryOperations; if (!rootDentry) { d_instantiate(*dentry, inode); } exit: return result; } /* *----------------------------------------------------------------------------- * * HgfsInstantiateRoot -- * * Gets the root dentry for a given super block. * * Results: * zero and a valid root dentry on success * negative value on failure * * Side effects: * None. * *----------------------------------------------------------------------------- */ int HgfsInstantiateRoot(struct super_block *sb, // IN: Super block object struct dentry **rootDentry) // OUT: Root dentry { int result; struct inode *rootInode = NULL; struct HgfsAttrInfo rootDentryAttr; ASSERT(sb != NULL); ASSERT(rootDentry != NULL); LOG(6, (KERN_DEBUG LGPFX "%s: entered\n", __func__)); *rootDentry = NULL; LOG(8, (KERN_DEBUG LGPFX "%s: retrieve root attrs\n", __func__)); result = HgfsPrivateGetattrRoot(sb, &rootDentryAttr); if (result) { LOG(4, (KERN_WARNING "VMware hgfs: %s: Could not the root attrs\n", __func__)); goto exit; } result = HgfsInstantiateInode(sb, HGFS_ROOT_INO, &rootDentryAttr, &rootInode); if (result) { LOG(6, (KERN_DEBUG LGPFX "%s: Could not get the root inode\n", __func__)); goto exit; } result = HgfsInstantiateDentry(rootInode, TRUE, rootDentry); /* * d_make_root() does iput() on failure; if d_make_root() completes * successfully then subsequent dput() will do iput() for us, so we * should just ignore root inode from now on. */ rootInode = NULL; exit: if (result) { iput(rootInode); dput(*rootDentry); } LOG(6, (KERN_DEBUG LGPFX "%s: return %d\n", __func__, result)); return result; } /* *----------------------------------------------------------------------------- * * HgfsInstantiate -- * * Tie a dentry to a looked up or created inode. Callers may choose to * supply their own attributes, or may leave attr NULL in which case the * attributes will be queried from the server. Likewise, an inode number * of zero may be specified, in which case HgfsIget will get one from the * server or, barring that, from iunique(). * * Results: * Zero on success, negative error otherwise. * * Side effects: * None. * *----------------------------------------------------------------------------- */ int HgfsInstantiate(struct dentry *dentry, // IN: Dentry to use ino_t ino, // IN: Inode number (optional) HgfsAttrInfo const *attr) // IN: Attributes to use (optional) { struct inode *inode; HgfsAttrInfo newAttr; int result; ASSERT(dentry); LOG(8, (KERN_DEBUG LGPFX "%s: entered\n", __func__)); /* If no specified attributes, get them from the server. */ if (attr == NULL) { LOG(8, (KERN_DEBUG LGPFX "%s: issuing getattr\n", __func__)); result = HgfsPrivateGetattr(dentry, &newAttr, NULL); if (result) { goto exit; } attr = &newAttr; } /* * Get the inode with this inode number and the attrs we got from * the server. */ result = HgfsInstantiateInode(dentry->d_sb, ino, attr, &inode); if (result) { goto exit; } /* Everything worked out, instantiate the dentry. */ result = HgfsInstantiateDentry(inode, FALSE, &dentry); exit: LOG(8, (KERN_DEBUG LGPFX "%s: return %d\n", __func__, result)); return result; } /* *----------------------------------------------------------------------------- * * HgfsBuildFullPath -- * * Constructs the full path given the super info and optional dentry. * This is a wrapper to the functions to build root path, if no dentry * is supplied, and the general build path routine. * * Results: * If non-negative, the length of the buffer written. * Otherwise, an error code. * * Side effects: * None * *----------------------------------------------------------------------------- */ static int HgfsBuildFullPath(char *buffer, // IN/OUT: Buffer to write into size_t bufferLen, // IN: Size of buffer HgfsSuperInfo *si, // IN: Super block info struct dentry *dentry) // IN: Optional first dentry to walk { int result = 0; if (dentry == NULL) { /* No dentry so we can construct only the root name. */ result = HgfsBuildRootPath(buffer, bufferLen, si); } else { result = HgfsBuildPath(buffer, bufferLen, dentry); } return result; } /* *----------------------------------------------------------------------------- * * HgfsBuildRootPath -- * * Constructs the root path given the super info. * * Results: * If non-negative, the length of the buffer written. * Otherwise, an error code. * * Side effects: * None * *----------------------------------------------------------------------------- */ static int HgfsBuildRootPath(char *buffer, // IN/OUT: Buffer to write into size_t bufferLen, // IN: Size of buffer HgfsSuperInfo *si) // IN: First dentry to walk { size_t shortestNameLength; /* * Buffer must hold at least the share name (which is already prefixed with * a forward slash), and nul. */ shortestNameLength = si->shareNameLen + 1; if (bufferLen < shortestNameLength) { return -ENAMETOOLONG; } memcpy(buffer, si->shareName, shortestNameLength); /* Short-circuit if we're at the root already. */ LOG(4, (KERN_DEBUG "VMware hgfs: %s: root path \"%s\"\n", __func__, buffer)); return shortestNameLength; } /* *----------------------------------------------------------------------------- * * HgfsBuildPath -- * * Constructs the full path given a dentry by walking the dentry and its * parents back to the root. Adapted from d_path(), smb_build_path(), and * build_path_from_dentry() implementations in Linux 2.6.16. * * Results: * If non-negative, the length of the buffer written. * Otherwise, an error code. * * Side effects: * None * *----------------------------------------------------------------------------- */ int HgfsBuildPath(char *buffer, // IN/OUT: Buffer to write into size_t bufferLen, // IN: Size of buffer struct dentry *dentry) // IN: First dentry to walk { int retval; size_t shortestNameLength; HgfsSuperInfo *si; ASSERT(buffer); ASSERT(dentry); ASSERT(dentry->d_sb); si = HGFS_SB_TO_COMMON(dentry->d_sb); retval = HgfsBuildRootPath(buffer, bufferLen, si); if (0 > retval) { return retval; } /* Short-circuit if we're at the root already. */ if (IS_ROOT(dentry)) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsBuildPath: Sending root \"%s\"\n", buffer)); return retval; } /* Skip the share name, but overwrite our previous nul. */ shortestNameLength = retval; buffer += shortestNameLength - 1; bufferLen -= shortestNameLength - 1; retval = 0; /* * Build the path string walking the tree backward from end to ROOT * and store it in reversed order. */ dget(dentry); compat_lock_dentry(dentry); while (!IS_ROOT(dentry)) { struct dentry *parent; size_t nameLen; nameLen = dentry->d_name.len; bufferLen -= nameLen + 1; if (bufferLen < 0) { compat_unlock_dentry(dentry); dput(dentry); LOG(4, (KERN_DEBUG "VMware hgfs: HgfsBuildPath: Ran out of space " "while writing dentry name\n")); return -ENAMETOOLONG; } buffer[bufferLen] = '/'; memcpy(buffer + bufferLen + 1, dentry->d_name.name, nameLen); retval += nameLen + 1; parent = dentry->d_parent; dget(parent); compat_unlock_dentry(dentry); dput(dentry); dentry = parent; compat_lock_dentry(dentry); } compat_unlock_dentry(dentry); dput(dentry); if (bufferLen == 0) { LOG(4, (KERN_DEBUG "VMware hgfs: HgfsBuildPath: Ran out of space while " "writing nul\n")); return -ENAMETOOLONG; } /* Shift the constructed string down to just past the share name. */ memmove(buffer, buffer + bufferLen, retval); buffer[retval] = '\0'; /* Don't forget the share name length (which also accounts for the nul). */ retval += shortestNameLength; LOG(4, (KERN_DEBUG "VMware hgfs: HgfsBuildPath: Built \"%s\"\n", buffer)); return retval; } /* *----------------------------------------------------------------------------- * * HgfsDentryAgeReset -- * * Reset the age of this dentry by setting d_time to now. * * XXX: smb_renew_times from smbfs claims it is safe to reset the time of * all the parent dentries too, but how is that possible? If I stat a file * using a relative path, only that relative path will be validated. Sure, * it means that the parents still /exist/, but that doesn't mean their * attributes are up to date. * * Results: * None. * * Side effects: * None. * *----------------------------------------------------------------------------- */ void HgfsDentryAgeReset(struct dentry *dentry) // IN: Dentry whose age to reset { ASSERT(dentry); LOG(8, (KERN_DEBUG "VMware hgfs: HgfsDentryAgeReset: entered\n")); dget(dentry); compat_lock_dentry(dentry); dentry->d_time = jiffies; compat_unlock_dentry(dentry); dput(dentry); } /* *----------------------------------------------------------------------------- * * HgfsDentryAgeForce -- * * Set the dentry's time to 0. This makes the dentry's age "too old" and * forces subsequent HgfsRevalidates to go to the server for attributes. * * Results: * None. * * Side effects: * Subsequent HgfsRevalidate will not use cached attributes. * *----------------------------------------------------------------------------- */ void HgfsDentryAgeForce(struct dentry *dentry) // IN: Dentry we want to force { ASSERT(dentry); LOG(8, (KERN_DEBUG "VMware hgfs: HgfsDentryAgeForce: entered\n")); dget(dentry); compat_lock_dentry(dentry); dentry->d_time = 0; compat_unlock_dentry(dentry); dput(dentry); } /* *---------------------------------------------------------------------- * * HgfsGetOpenMode -- * * Based on the flags requested by the process making the open() * syscall, determine which open mode (access type) to request from * the server. * * Results: * Returns the correct HgfsOpenMode enumeration to send to the * server, or -1 on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ int HgfsGetOpenMode(uint32 flags) // IN: Open flags { uint32 mask = O_RDONLY|O_WRONLY|O_RDWR; int result = -1; LOG(6, (KERN_DEBUG "VMware hgfs: %s: (%u) entered\n", __func__, flags)); /* * Mask the flags to only look at the access type. */ flags &= mask; /* Pick the correct HgfsOpenMode. */ switch (flags) { case O_RDONLY: result = HGFS_OPEN_MODE_READ_ONLY; break; case O_WRONLY: result = HGFS_OPEN_MODE_WRITE_ONLY; break; case O_RDWR: result = HGFS_OPEN_MODE_READ_WRITE; break; default: /* * This should never happen, but it could if a userlevel program * is behaving poorly. */ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetOpenMode: invalid " "open flags %o\n", flags)); result = -1; break; } LOG(6, (KERN_DEBUG "VMware hgfs: %s: return %d\n", __func__, result)); return result; } /* *---------------------------------------------------------------------- * * HgfsGetOpenFlags -- * * Based on the flags requested by the process making the open() * syscall, determine which flags to send to the server to open the * file. * * Results: * Returns the correct HgfsOpenFlags enumeration to send to the * server, or -1 on failure. * * Side effects: * None * *---------------------------------------------------------------------- */ int HgfsGetOpenFlags(uint32 flags) // IN: Open flags { uint32 mask = O_CREAT | O_TRUNC | O_EXCL; int result = -1; LOG(6, (KERN_DEBUG "VMware hgfs: %s: (%u) entered\n", __func__, flags)); /* * Mask the flags to only look at O_CREAT, O_EXCL, and O_TRUNC. */ flags &= mask; /* O_EXCL has no meaning if O_CREAT is not set. */ if (!(flags & O_CREAT)) { flags &= ~O_EXCL; } /* Pick the right HgfsOpenFlags. */ switch (flags) { case 0: /* Regular open; fails if file nonexistant. */ result = HGFS_OPEN; break; case O_CREAT: /* Create file; if it exists already just open it. */ result = HGFS_OPEN_CREATE; break; case O_TRUNC: /* Truncate existing file; fails if nonexistant. */ result = HGFS_OPEN_EMPTY; break; case (O_CREAT | O_EXCL): /* Create file; fail if it exists already. */ result = HGFS_OPEN_CREATE_SAFE; break; case (O_CREAT | O_TRUNC): /* Create file; if it exists already, truncate it. */ result = HGFS_OPEN_CREATE_EMPTY; break; default: /* * This can only happen if all three flags are set, which * conceptually makes no sense because O_EXCL and O_TRUNC are * mutually exclusive if O_CREAT is set. * * However, the open(2) man page doesn't say you can't set all * three flags, and certain apps (*cough* Nautilus *cough*) do * so. To be friendly to those apps, we just silenty drop the * O_TRUNC flag on the assumption that it's safer to honor * O_EXCL. */ LOG(4, (KERN_DEBUG "VMware hgfs: HgfsGetOpenFlags: invalid open " "flags %o. Ignoring the O_TRUNC flag.\n", flags)); result = HGFS_OPEN_CREATE_SAFE; break; } LOG(6, (KERN_DEBUG "VMware hgfs: %s: return %d\n", __func__, result)); return result; } /* *----------------------------------------------------------------------------- * * HgfsCreateFileInfo -- * * Create the HGFS-specific file information struct and store a pointer to * it in the VFS file pointer. Also, link the file information struct in the * inode's file list, so that we may find it when all we have is an inode * (such as in writepage()). * * Results: * Zero if success, non-zero if error. * * Side effects: * None * *----------------------------------------------------------------------------- */ int HgfsCreateFileInfo(struct file *file, // IN: File pointer to attach to HgfsHandle handle) // IN: Handle returned from server { HgfsFileInfo *fileInfo; HgfsInodeInfo *inodeInfo; int mode; ASSERT(file); inodeInfo = INODE_GET_II_P(file->f_dentry->d_inode); ASSERT(inodeInfo); /* Get the mode of the opened file. */ mode = HgfsGetOpenMode(file->f_flags); if (mode < 0) { return -EINVAL; } /* * Store the file information for this open() in the file*. This needs * to be freed on a close(). Note that we trim all flags from the open * mode and increment it so that it is guaranteed to be non-zero, because * callers of HgfsGetHandle may pass in zero as the desired mode if they * don't care about the mode of the opened handle. * * XXX: Move this into a slab allocator once HgfsFileInfo is large. One day * soon, the kernel will allow us to embed the vfs file into our file info, * like we currently do for inodes. */ fileInfo = kmalloc(sizeof *fileInfo, GFP_KERNEL); if (!fileInfo) { return -ENOMEM; } fileInfo->handle = handle; fileInfo->mode = HGFS_OPEN_MODE_ACCMODE(mode) + 1; FILE_SET_FI_P(file, fileInfo); /* So that readdir() reissues open request */ fileInfo->isStale = TRUE; fileInfo->direntPos = 0; /* * I don't think we need any VFS locks since we're only touching the HGFS * specific state. But we should still acquire our own lock. * * XXX: Better granularity on locks, etc. */ spin_lock(&hgfsBigLock); list_add_tail(&fileInfo->list, &inodeInfo->files); spin_unlock(&hgfsBigLock); return 0; } /* *----------------------------------------------------------------------------- * * HgfsReleaseFileInfo -- * * Release HGFS-specific file information struct created in * HgfsCreateFileInfo. * * Results: * None * * Side effects: * None * *----------------------------------------------------------------------------- */ void HgfsReleaseFileInfo(struct file *file) // IN: File pointer to detach from { HgfsFileInfo *fileInfo; ASSERT(file); fileInfo = FILE_GET_FI_P(file); ASSERT(fileInfo); spin_lock(&hgfsBigLock); list_del_init(&fileInfo->list); spin_unlock(&hgfsBigLock); kfree(fileInfo); FILE_SET_FI_P(file, NULL); } /* *----------------------------------------------------------------------------- * * HgfsGetHandle -- * * Retrieve an existing HGFS handle for this inode, assuming one exists. * The handle retrieved satisfies the mode desired by the client. * * The desired mode does not correspond directly to HgfsOpenMode. Callers * should either increment the desired HgfsOpenMode, or, if any mode will * do, pass zero instead. This is in line with the Linux kernel's behavior * (see do_filp_open() and open_namei() for details). * * Results: * Zero on success, non-zero on error. * * Side effects: * None. * *----------------------------------------------------------------------------- */ int HgfsGetHandle(struct inode *inode, // IN: Inode to search for handles HgfsOpenMode mode, // IN: Mode to satisfy HgfsHandle *handle) // OUT: Retrieved HGFS handle { HgfsInodeInfo *iinfo; struct list_head *cur; Bool found = FALSE; ASSERT(handle); LOG(6, (KERN_DEBUG "VMware hgfs: HgfsGetHandle: desired mode %u\n", mode)); /* * We may have been called from a dentry without an associated inode. * HgfsReadSuper is one such caller. No inode means no open files, so * return an error. */ if (inode == NULL) { LOG(8, (KERN_DEBUG "VMware hgfs: HgfsGetHandle: NULL input\n")); return -EINVAL; } iinfo = INODE_GET_II_P(inode); /* * Unfortunately, we can't reuse handles belonging to directories. These * handles were created by a SearchOpen request, but the server itself * backed them with an artificial list of dentries populated via scandir. So * it can't actually use the handles for Getattr or Setattr requests, only * for subsequent SearchRead or SearchClose requests. */ if (S_ISDIR(inode->i_mode)) { LOG(8, (KERN_DEBUG "VMware hgfs: HgfsGetHandle: Called on directory\n")); return -EINVAL; } /* * Iterate over the open handles for this inode, and find one that allows * the given mode. A desired mode of zero means "any mode will do". * Otherwise return an error; */ spin_lock(&hgfsBigLock); list_for_each(cur, &iinfo->files) { HgfsFileInfo *finfo = list_entry(cur, HgfsFileInfo, list); if (mode == 0 || finfo->mode & mode) { *handle = finfo->handle; found = TRUE; break; } } spin_unlock(&hgfsBigLock); if (found) { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsGetHandle: Returning handle %d\n", *handle)); return 0; } else { LOG(6, (KERN_DEBUG "VMware hgfs: HgfsGetHandle: Could not find matching " "handle\n")); return -ENOENT; } } /* *----------------------------------------------------------------------------- * * HgfsStatusConvertToLinux -- * * Convert a cross-platform HGFS status code to its Linux-kernel specific * counterpart. * * Rather than encapsulate the status codes within an array indexed by the * various HGFS status codes, we explicitly enumerate them in a switch * statement, saving the reader some time when matching HGFS status codes * against Linux status codes. * * Results: * Zero if the converted status code represents success, negative error * otherwise. Unknown status codes are converted to the more generic * "protocol error" status code to maintain forwards compatibility. * * Side effects: * None. * *----------------------------------------------------------------------------- */ int HgfsStatusConvertToLinux(HgfsStatus hgfsStatus) // IN: Status code to convert { switch (hgfsStatus) { case HGFS_STATUS_SUCCESS: return 0; case HGFS_STATUS_NO_SUCH_FILE_OR_DIR: case HGFS_STATUS_INVALID_NAME: return -ENOENT; case HGFS_STATUS_INVALID_HANDLE: return -EBADF; case HGFS_STATUS_OPERATION_NOT_PERMITTED: return -EPERM; case HGFS_STATUS_FILE_EXISTS: return -EEXIST; case HGFS_STATUS_NOT_DIRECTORY: return -ENOTDIR; case HGFS_STATUS_DIR_NOT_EMPTY: return -ENOTEMPTY; case HGFS_STATUS_PROTOCOL_ERROR: return -EPROTO; case HGFS_STATUS_ACCESS_DENIED: case HGFS_STATUS_SHARING_VIOLATION: return -EACCES; case HGFS_STATUS_NO_SPACE: return -ENOSPC; case HGFS_STATUS_OPERATION_NOT_SUPPORTED: return -EOPNOTSUPP; case HGFS_STATUS_NAME_TOO_LONG: return -ENAMETOOLONG; case HGFS_STATUS_GENERIC_ERROR: return -EIO; case HGFS_STATUS_NOT_SAME_DEVICE: return -EXDEV; default: LOG(10, (KERN_DEBUG "VMware hgfs: HgfsStatusConvertToLinux: unknown " "error: %u\n", hgfsStatus)); return -EIO; } } /* *---------------------------------------------------------------------------- * * HgfsSetUidGid -- * * Sets the uid and gid of the host file represented by the provided * dentry. * * Note that this function assumes it is being called for a file that has * been created on the host with the correct gid if the sgid bit is set for * the parent directory. That is, we treat the presence of the sgid bit in * the parent direcory's mode as an indication not to set the gid manually * ourselves here. If we did, we would clobber the gid that the host file * system chose for us automatically when the file was created. * * Also note that the sgid bit itself would have been propagated to the new * file by the host file system as well. * * Results: * None. * * Side effects: * The host file's uid and gid are modified if the hgfs server has * permission to do so. * *---------------------------------------------------------------------------- */ void HgfsSetUidGid(struct inode *parent, // IN: parent inode struct dentry *dentry, // IN: dentry of file to update kuid_t uid, // IN: uid to set kgid_t gid) // IN: gid to set { struct iattr setUidGid; LOG(6, (KERN_DEBUG "VMware hgfs: %s: entered \n", __func__)); setUidGid.ia_valid = ATTR_UID; setUidGid.ia_uid = uid; /* * Only set the gid if the host file system wouldn't have for us. See the * comment in the function header. */ if (!parent || !(parent->i_mode & S_ISGID)) { setUidGid.ia_valid |= ATTR_GID; setUidGid.ia_gid = gid; } /* * After the setattr, we desperately want a revalidate so we can * get the true attributes from the server. However, the setattr * may have done that for us. To prevent a spurious revalidate, * reset the dentry's time before the setattr. That way, if setattr * ends up revalidating the dentry, the subsequent call to * revalidate will do nothing. */ HgfsDentryAgeForce(dentry); HgfsSetattr(dentry, &setUidGid); HgfsRevalidate(dentry); LOG(6, (KERN_DEBUG "VMware hgfs: %s: returns\n", __func__)); } /* *---------------------------------------------------------------------------- * * HgfsGetInode -- * * This function replaces iget() and should be called instead of it. * HgfsGetInode() obtains an inode and, if it is a new one, initializes * it calling HgfsDoReadInode(). * * Results: * A new inode object on success, NULL on error. * * Side effects: * None. * *---------------------------------------------------------------------------- */ static struct inode * HgfsGetInode(struct super_block *sb, // IN: file system superblock object ino_t ino, // IN: inode number to assign to new inode HgfsInodeAttrDesc *iattrDesc) // IN: Attributes to create with { return iget5_locked(sb, ino, HgfsFindInode, HgfsInitInode, iattrDesc); } /* *---------------------------------------------------------------------------- * * HgfsFindInode -- * * This function is a callback used for comparisons between inodes. * * Results: * 1 on success, 0 on error. * * Side effects: * None. * *---------------------------------------------------------------------------- */ static int HgfsFindInode(struct inode *inode, // IN: compared inode void *opaque) // IN: attribute data { HgfsInodeAttrDesc *iattrDesc = opaque; HgfsInodeInfo *iinfo = INODE_GET_II_P(inode); int result = 1; ASSERT(iattrDesc != NULL); if (iinfo->hostFileId != iattrDesc->attr->hostFileId) { /* Don't match inode with different unique Ids. */ result = 0; } else if (iinfo->isFakeInodeNumber || iattrDesc->attr->type == HGFS_FILE_TYPE_DIRECTORY) { iattrDesc->flags |= HGFS_INO_DESC_INO_COLLISION; /* Deal with a collision and retry again. */ result = 1; } else if (iattrDesc->attr->type != HgfsGetFileType(inode)) { /* Don't match inodes of different types. */ result = 0; } else if (is_bad_inode(inode)) { result = 0; } return result; } /* *---------------------------------------------------------------------------- * * HgfsInitInode -- * * This function is a callback used to initialize a new struct inode. * * Results: * Zero on success always. * * Side effects: * None. * *---------------------------------------------------------------------------- */ static int HgfsInitInode(struct inode *inode, // IN: inode to init void *opaque) // IN: attributes to init with { HgfsInodeAttrDesc *iattrDesc = opaque; HgfsInodeInfo *iinfo = INODE_GET_II_P(inode); ASSERT(iattrDesc != NULL); if ((iattrDesc->attr->mask & HGFS_ATTR_VALID_FILEID) != 0) { iinfo->hostFileId = iattrDesc->attr->hostFileId; } iinfo->isFakeInodeNumber = (iattrDesc->flags & HGFS_INO_DESC_INO_FAKE) != 0; return 0; } /* *---------------------------------------------------------------------------- * * HgfsDoReadInode -- * * A filesystem wide function that is called to initialize a new inode. * This is called from two different places depending on the kernel version. * In older kernels that provide the iget() interface, this function is * called by the kernel as part of inode initialization (from * HgfsDoReadInode). In newer kernels that call iget_locked(), this * function is called by filesystem code to initialize the new inode. * * Results: * None. * * Side effects: * None. * *---------------------------------------------------------------------------- */ static void HgfsDoReadInode(struct inode *inode) // IN: Inode to initialize { HgfsInodeInfo *iinfo = INODE_GET_II_P(inode); /* * If the vfs inode is not embedded within the HgfsInodeInfo, then we * haven't yet allocated the HgfsInodeInfo. Do so now. * * XXX: We could allocate with GFP_ATOMIC. But instead, we'll do a standard * allocation and mark the inode "bad" if the allocation fails. This'll * make all subsequent operations on the inode fail, which is what we want. */ INODE_SET_II_P(inode, iinfo); INIT_LIST_HEAD(&iinfo->files); iinfo->createdAndUnopened = FALSE; iinfo->numWbPages = 0; INIT_LIST_HEAD(&iinfo->listWbPages); } vmhgfs-only/Makefile.kernel 0000444 0000000 0000000 00000010526 13432725306 014753 0 ustar root root #!/usr/bin/make -f ########################################################## # Copyright (C) 1998-2016 VMware, Inc. All rights reserved. # # This program is free software; you can redistribute it and/or modify it # under the terms of the GNU General Public License as published by the # Free Software Foundation version 2 and no later version. # # This program is distributed in the hope that it will be useful, but # WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY # or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License # for more details. # # You should have received a copy of the GNU General Public License along # with this program; if not, write to the Free Software Foundation, Inc., # 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA # ########################################################## #### #### VMware vmhgfs Makefile to be distributed externally #### INCLUDE += -I. EXTRA_CFLAGS := $(CC_OPTS) $(INCLUDE) EXTRA_CFLAGS += $(call vm_check_build, $(AUTOCONF_DIR)/cachector.c, -DVMW_KMEMCR_CTOR_HAS_3_ARGS, ) EXTRA_CFLAGS += $(call vm_check_build, $(AUTOCONF_DIR)/cachector1.c, -DVMW_KMEMCR_CTOR_HAS_2_ARGS, ) EXTRA_CFLAGS += $(call vm_check_build, $(AUTOCONF_DIR)/file_operations_fsync.c, -DVMW_FSYNC_31, ) EXTRA_CFLAGS += $(call vm_check_build, $(AUTOCONF_DIR)/file_operations_flush.c, -DVMW_FLUSH_HAS_1_ARG, ) # Note: These tests are inverted EXTRA_CFLAGS += $(call vm_check_build, $(AUTOCONF_DIR)/getsb1.c,, -DVMW_GETSB_2618) EXTRA_CFLAGS += $(call vm_check_build, $(AUTOCONF_DIR)/statfs1.c,, -DVMW_STATFS_2618) EXTRA_CFLAGS += $(call vm_check_build, $(AUTOCONF_DIR)/inode1.c,, -DVMW_INODE_2618) EXTRA_CFLAGS += $(call vm_check_build, $(AUTOCONF_DIR)/dcount.c,, -DVMW_DCOUNT_311) EXTRA_CFLAGS += $(call vm_check_build, $(AUTOCONF_DIR)/dalias.c,, -DVMW_DALIAS_319) EXTRA_CFLAGS += $(call vm_check_build, $(AUTOCONF_DIR)/dalias1.c,, -DVMW_DALIAS_319) EXTRA_CFLAGS += $(call vm_check_build, $(AUTOCONF_DIR)/truncate_pagecache.c,, -DVMW_PAGECACHE_312) EXTRA_CFLAGS += $(call vm_check_build, $(AUTOCONF_DIR)/wait_on_bit.c,, -DVMW_WAITONBIT_317) obj-m += $(DRIVER).o $(DRIVER)-y := $(subst $(SRCROOT)/, , $(patsubst %.c, %.o, $(wildcard $(SRCROOT)/*.c))) # # In open-vm-tools, need to compile the common sources from the lib directory. # VMHGFS_PATH := $(shell cd $(SRCROOT) && pwd) ifdef OVT_SOURCE_DIR LIBBACKDOOR_PATH := $(call VMLIB_PATH,backdoor) LIBHGFS_PATH := $(call VMLIB_PATH,hgfs) LIBHGFSBD_PATH := $(call VMLIB_PATH,hgfsBd) LIBMESSAGE_PATH := $(call VMLIB_PATH,message) LIBRPCOUT_PATH := $(call VMLIB_PATH,rpcOut) STUBS_PATH := $(OVT_SOURCE_DIR)/modules/linux/shared INCLUDE += -I$(LIBBACKDOOR_PATH) INCLUDE += -I$(LIBHGFS_PATH) LIBBACKDOOR := backdoor.o LIBBACKDOOR += backdoorGcc32.o LIBBACKDOOR += backdoorGcc64.o LIBHGFS := cpName.o LIBHGFS += cpNameLinux.o LIBHGFS += cpNameLite.o LIBHGFS += hgfsEscape.o LIBHGFS += hgfsUtil.o LIBHGFSBD := hgfsBd.o LIBMESSAGE := message.o LIBRPCOUT := rpcout.o $(addprefix $(VMHGFS_PATH)/,$(LIBBACKDOOR)): $(VMHGFS_PATH)/%.o: $(LIBBACKDOOR_PATH)/%.c $(Q)$(rule_cc_o_c) $(addprefix $(VMHGFS_PATH)/,$(LIBHGFS)): $(VMHGFS_PATH)/%.o: $(LIBHGFS_PATH)/%.c $(Q)$(rule_cc_o_c) $(addprefix $(VMHGFS_PATH)/,$(LIBHGFSBD)): $(VMHGFS_PATH)/%.o: $(LIBHGFSBD_PATH)/%.c $(Q)$(rule_cc_o_c) $(addprefix $(VMHGFS_PATH)/,$(LIBMESSAGE)): $(VMHGFS_PATH)/%.o: $(LIBMESSAGE_PATH)/%.c $(Q)$(rule_cc_o_c) $(addprefix $(VMHGFS_PATH)/,$(LIBRPCOUT)): $(VMHGFS_PATH)/%.o: $(LIBRPCOUT_PATH)/%.c $(Q)$(rule_cc_o_c) $(DRIVER)-y += $(LIBBACKDOOR) $(DRIVER)-y += $(LIBHGFS) $(DRIVER)-y += $(LIBHGFSBD) $(DRIVER)-y += $(LIBMESSAGE) $(DRIVER)-y += $(LIBRPCOUT) else STUBS_PATH := $(VMHGFS_PATH)/shared endif STUBS := kernelStubsLinux.o $(DRIVER)-y += $(STUBS) $(addprefix $(VMHGFS_PATH)/,$(STUBS)): $(VMHGFS_PATH)/%.o: $(STUBS_PATH)/%.c $(Q)$(rule_cc_o_c) # # On a 32-bit machine, strip out 64-bit backdoor code, and vice versa. # ifeq ($(CONFIG_X86_64),y) $(DRIVER)-y := $(filter-out backdoorGcc32.o, $($(DRIVER)-y)) else $(DRIVER)-y := $(filter-out backdoorGcc64.o, $($(DRIVER)-y)) endif clean: rm -rf $(wildcard $(DRIVER).mod.c $(DRIVER).ko .tmp_versions \ Module.symvers Modules.symvers Module.markers modules.order \ $(foreach dir,./,$(addprefix $(dir),.*.cmd .*.o.flags *.o))) vmhgfs-only/cpName.c 0000444 0000000 0000000 00000031443 13432725346 013410 0 ustar root root /********************************************************* * Copyright (C) 1998-2016 VMware, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation version 2 and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * *********************************************************/ /* * cpName.c -- * * Shared portions of cross-platform name conversion routines used * by hgfs. [bac] * */ #ifdef sun #include <string.h> #endif #include "cpName.h" #include "cpNameInt.h" #include "vm_assert.h" #include "hgfsEscape.h" /* *---------------------------------------------------------------------- * * CPName_GetComponent -- * * Get the next component of the CP name. * * Returns the length of the component starting with the begin * pointer, and a pointer to the next component in the buffer, if * any. The "next" pointer is set to "end" if there is no next * component. * * Results: * length (not including NUL termination) >= 0 of next * component on success. * error < 0 on failure (invalid component). * * Side effects: * None * *---------------------------------------------------------------------- */ int CPName_GetComponent(char const *begin, // IN: Beginning of buffer char const *end, // IN: End of buffer char const **next) // OUT: Start of next component { char const *walk; char const *myNext; size_t len; ASSERT(begin); ASSERT(end); ASSERT(next); ASSERT(begin <= end); for (walk = begin; ; walk++) { if (walk == end) { /* End of buffer. No NUL was found */ myNext = end; break; } if (*walk == '\0') { /* Found a NUL */ if (walk == begin) { Log("%s: error: first char can't be NUL\n", __FUNCTION__); return -1; } myNext = walk + 1; /* Skip consecutive path delimiters. */ while ((*myNext == '\0') && (myNext != end)) { myNext++; } if (myNext == end) { /* Last character in the buffer is not allowed to be NUL */ Log("%s: error: last char can't be NUL\n", __FUNCTION__); return -1; } break; } } len = walk - begin; *next = myNext; return (int) len; } /* *---------------------------------------------------------------------- * * CPNameEscapeAndConvertFrom -- * * Converts a cross-platform name representation into a string for * use in the local filesystem. * Escapes illegal characters as a part of convertion. * This is a cross-platform implementation and takes the path separator * argument as an argument. The path separator is prepended before each * additional path component, so this function never adds a trailing path * separator. * * Results: * 0 on success. * error < 0 on failure (the converted string did not fit in * the buffer provided or the input was invalid). * * Side effects: * None * *---------------------------------------------------------------------- */ int CPNameEscapeAndConvertFrom(char const **bufIn, // IN/OUT: Input to convert size_t *inSize, // IN/OUT: Size of input size_t *outSize, // IN/OUT: Size of output buffer char **bufOut, // IN/OUT: Output buffer char pathSep) // IN: Path separator character { int result; int inputSize; inputSize = HgfsEscape_GetSize(*bufIn, *inSize); if (inputSize < 0) { result = -1; } else if (inputSize != 0) { char *savedBufOut = *bufOut; char const *savedOutConst = savedBufOut; size_t savedOutSize = *outSize; if (inputSize > *outSize) { Log("%s: error: not enough room for escaping\n", __FUNCTION__); return -1; } /* Leaving space for the leading path separator, thus output to savedBufOut + 1. */ *inSize = HgfsEscape_Do(*bufIn, *inSize, savedOutSize, savedBufOut + 1); result = CPNameConvertFrom(&savedOutConst, inSize, outSize, bufOut, pathSep); *bufIn += *inSize; *inSize = 0; } else { result = CPNameConvertFrom(bufIn, inSize, outSize, bufOut, pathSep); } return result; } /* *---------------------------------------------------------------------- * * CPNameConvertFrom -- * * Converts a cross-platform name representation into a string for * use in the local filesystem. This is a cross-platform * implementation and takes the path separator argument as an * argument. The path separator is prepended before each additional * path component, so this function never adds a trailing path * separator. * * Results: * 0 on success. * error < 0 on failure (the converted string did not fit in * the buffer provided or the input was invalid). * * Side effects: * None * *---------------------------------------------------------------------- */ int CPNameConvertFrom(char const **bufIn, // IN/OUT: Input to convert size_t *inSize, // IN/OUT: Size of input size_t *outSize, // IN/OUT: Size of output buffer char **bufOut, // IN/OUT: Output buffer char pathSep) // IN: Path separator character { char const *in; char const *inEnd; size_t myOutSize; char *out; Bool inPlaceConvertion = (*bufIn == *bufOut); ASSERT(bufIn); ASSERT(inSize); ASSERT(outSize); ASSERT(bufOut); in = *bufIn; if (inPlaceConvertion) { in++; // Skip place for the leading path separator. } inEnd = in + *inSize; myOutSize = *outSize; out = *bufOut; for (;;) { char const *next; int len; int newLen; len = CPName_GetComponent(in, inEnd, &next); if (len < 0) { Log("%s: error: get next component failed\n", __FUNCTION__); return len; } /* Bug 27926 - preventing escaping from shared folder. */ if ((len == 1 && *in == '.') || (len == 2 && in[0] == '.' && in[1] == '.')) { Log("%s: error: found dot/dotdot\n", __FUNCTION__); return -1; } if (len == 0) { /* No more component */ break; } newLen = ((int) myOutSize) - len - 1; if (newLen < 0) { Log("%s: error: not enough room\n", __FUNCTION__); return -1; } myOutSize = (size_t) newLen; *out++ = pathSep; if (!inPlaceConvertion) { memcpy(out, in, len); } out += len; in = next; } /* NUL terminate */ if (myOutSize < 1) { Log("%s: error: not enough room\n", __FUNCTION__); return -1; } *out = '\0'; /* Path name size should not require more than 4 bytes. */ ASSERT((in - *bufIn) <= 0xFFFFFFFF); /* Update pointers. */ *inSize -= (in - *bufIn); *outSize = myOutSize; *bufIn = in; *bufOut = out; return 0; } /* *---------------------------------------------------------------------------- * * CPName_Print -- * * Converts a CPName formatted string to a valid, NUL-terminated string by * replacing all embedded NUL characters with '|'. * * Results: * Pointer to a static buffer containing the converted string. * * Side effects: * None. * *---------------------------------------------------------------------------- */ char const * CPName_Print(char const *in, // IN: Name to print size_t size) // IN: Size of name { /* Static so it does not go on a kernel stack --hpreg */ static char out[128]; size_t i; ASSERT(in); ASSERT(sizeof out >= 4); if (size > sizeof out - 1) { size = sizeof out - 4; out[size] = '.'; out[size + 1] = '.'; out[size + 2] = '.'; out[size + 3] = '\0'; } else { out[size] = '\0'; } for (i = 0; i < size; i++) { out[i] = in[i] != '\0' ? in[i] : '|'; } return out; } /* *---------------------------------------------------------------------------- * * CPName_LinuxConvertTo -- * * Wrapper function that calls CPNameConvertTo() with the correct arguments * for Linux path conversions. * * Makes a cross-platform name representation from the Linux path input * string and writes it into the output buffer. * * Results: * On success, returns the number of bytes used in the cross-platform name, * NOT including the final terminating NUL character. On failure, returns * a negative error. * * Side effects: * None. * *---------------------------------------------------------------------------- */ int CPName_LinuxConvertTo(char const *nameIn, // IN: Buf to convert size_t bufOutSize, // IN: Size of the output buffer char *bufOut) // OUT: Output buffer { return CPNameConvertTo(nameIn, bufOutSize, bufOut, '/'); } /* *---------------------------------------------------------------------------- * * CPName_WindowsConvertTo -- * * Wrapper function that calls CPNameConvertTo() with the correct arguments * for Windows path conversions. * * Makes a cross-platform name representation from the Linux path input * string and writes it into the output buffer. * * Results: * On success, returns the number of bytes used in the cross-platform name, * NOT including the final terminating NUL character. On failure, returns * a negative error. * * Side effects: * None. * *---------------------------------------------------------------------------- */ int CPName_WindowsConvertTo(char const *nameIn, // IN: Buf to convert size_t bufOutSize, // IN: Size of the output buffer char *bufOut) // OUT: Output buffer { return CPNameConvertTo(nameIn, bufOutSize, bufOut, '\\'); } /* *---------------------------------------------------------------------- * * CPNameConvertTo -- * * Makes a cross-platform name representation from the input string * and writes it into the output buffer. * HGFS convention is to echange names between guest and host in uescaped form. * Both ends perform necessary name escaping according to its own rules * to avoid presenitng invalid file names to OS. Thus the name needs to be unescaped * as a part of conversion to host-independent format. * * Results: * On success, returns the number of bytes used in the * cross-platform name, NOT including the final terminating NUL * character. On failure, returns a negative error. * * Side effects: * None * *---------------------------------------------------------------------- */ int CPNameConvertTo(char const *nameIn, // IN: Buf to convert size_t bufOutSize, // IN: Size of the output buffer char *bufOut, // OUT: Output buffer char pathSep) // IN: path separator to use { char *origOut = bufOut; char const *endOut = bufOut + bufOutSize; size_t cpNameLength = 0; ASSERT(nameIn); ASSERT(bufOut); /* Skip any path separators at the beginning of the input string */ while (*nameIn == pathSep) { nameIn++; } /* * Copy the string to the output buf, converting all path separators into '\0'. * Collapse multiple consecutive path separators into a single one since * CPName_GetComponent can't handle consecutive path separators. */ while (*nameIn != '\0' && bufOut < endOut) { if (*nameIn == pathSep) { *bufOut = '\0'; do { nameIn++; } while (*nameIn == pathSep); } else { *bufOut = *nameIn; nameIn++; } bufOut++; } /* * NUL terminate. XXX This should go away. * * When we get rid of NUL termination here, this test should * also change to "if (*nameIn != '\0')". */ if (bufOut == endOut) { return -1; } *bufOut = '\0'; /* Path name size should not require more than 4 bytes. */ ASSERT((bufOut - origOut) <= 0xFFFFFFFF); /* If there were any trailing path separators, dont count them [krishnan] */ cpNameLength = bufOut - origOut; while ((cpNameLength >= 1) && (origOut[cpNameLength - 1] == 0)) { cpNameLength--; } cpNameLength = HgfsEscape_Undo(origOut, cpNameLength); /* Return number of bytes used */ return (int) cpNameLength; }