diff mbox series

[v8,1/7] x86/vmware: Move common macros to vmware.h

Message ID 20240422225656.10309-2-alexey.makhalov@broadcom.com
State Superseded
Headers show
Series VMware hypercalls enhancements | expand

Commit Message

Alexey Makhalov April 22, 2024, 10:56 p.m. UTC
Move VMware hypercall macros to vmware.h. This is a prerequisite for
the introduction of vmware_hypercall API. No functional changes besides
exporting vmware_hypercall_mode symbol.

Signed-off-by: Alexey Makhalov <alexey.makhalov@broadcom.com>
Reviewed-by: Nadav Amit <nadav.amit@gmail.com>
---
 arch/x86/include/asm/vmware.h | 72 +++++++++++++++++++++++++++++------
 arch/x86/kernel/cpu/vmware.c  | 57 +++------------------------
 2 files changed, 66 insertions(+), 63 deletions(-)

Comments

Borislav Petkov April 24, 2024, 4:06 p.m. UTC | #1
On Mon, Apr 22, 2024 at 03:56:50PM -0700, Alexey Makhalov wrote:
> Move VMware hypercall macros to vmware.h. This is a prerequisite for
> the introduction of vmware_hypercall API. No functional changes besides
> exporting vmware_hypercall_mode symbol.

Well, I see more.

So code movement patches should be done this way:

* first patch: sole code movement, no changes whatsoever

* follow-on patches: add changes and explain them

Because... (follow me down)...

> @@ -476,8 +431,8 @@ static bool __init vmware_legacy_x2apic_available(void)
>  {
>  	uint32_t eax, ebx, ecx, edx;
>  	VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx);
> -	return !(eax & BIT(VMWARE_CMD_VCPU_RESERVED)) &&
> -		(eax & BIT(VMWARE_CMD_LEGACY_X2APIC));
> +	return !(eax & BIT(VCPU_RESERVED)) &&
> +		(eax & BIT(VCPU_LEGACY_X2APIC));

... what is that change for?

Those bit definitions are clearly vmware-specific. So why are you
changing them to something generic-ish?

In any case, this patch needs to be split as outlined above.

Thx.
Alexey Makhalov April 24, 2024, 11:12 p.m. UTC | #2
On 4/24/24 9:06 AM, Borislav Petkov wrote:
> On Mon, Apr 22, 2024 at 03:56:50PM -0700, Alexey Makhalov wrote:
>> Move VMware hypercall macros to vmware.h. This is a prerequisite for
>> the introduction of vmware_hypercall API. No functional changes besides
>> exporting vmware_hypercall_mode symbol.
> 
> Well, I see more.
> 
> So code movement patches should be done this way:
> 
> * first patch: sole code movement, no changes whatsoever
> 
> * follow-on patches: add changes and explain them
> 
> Because... (follow me down)...
> 
>> @@ -476,8 +431,8 @@ static bool __init vmware_legacy_x2apic_available(void)
>>   {
>>   	uint32_t eax, ebx, ecx, edx;
>>   	VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx);
>> -	return !(eax & BIT(VMWARE_CMD_VCPU_RESERVED)) &&
>> -		(eax & BIT(VMWARE_CMD_LEGACY_X2APIC));
>> +	return !(eax & BIT(VCPU_RESERVED)) &&
>> +		(eax & BIT(VCPU_LEGACY_X2APIC));
> 
> ... what is that change for?
> 
> Those bit definitions are clearly vmware-specific. So why are you
> changing them to something generic-ish?
> 
> In any case, this patch needs to be split as outlined above.

Thanks for prompt review. The concern is valid.
I've split this patch on 2 pieces:
1. Macro renaming - to use proper prefix GETVCPU_INFO_ instead of 
incorrect VMWARE_CMD_.
2. Code movement - the original idea of the patch.

Remaining patches will remain intact.

Thanks,
--Alexey
Borislav Petkov April 25, 2024, 3:21 p.m. UTC | #3
On Wed, Apr 24, 2024 at 04:14:06PM -0700, Alexey Makhalov wrote:
> VCPU_RESERVED and LEGACY_X2APIC are not VMware hypercall commands.
> These are bits in return value of VMWARE_CMD_GETVCPU_INFO command.
> Change VMWARE_CMD_ prefix to GETVCPU_INFO_ one. And move bit-shift
> operation to the macro body.

I don't understand:

$ git grep GETVCPU_INFO
arch/x86/kernel/cpu/vmware.c:51:#define VMWARE_CMD_GETVCPU_INFO  68
arch/x86/kernel/cpu/vmware.c:478:       VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx);

so that's a VMWARE_CMD 68, at least the prefix says so.

And those two are *bits* in that eax which that hypercall returns.

Or are those two bits generic but defined in a vmware-specific
hypercall?

Hm.
Alexey Makhalov April 25, 2024, 5:27 p.m. UTC | #4
On 4/25/24 8:21 AM, Borislav Petkov wrote:
> On Wed, Apr 24, 2024 at 04:14:06PM -0700, Alexey Makhalov wrote:
>> VCPU_RESERVED and LEGACY_X2APIC are not VMware hypercall commands.
>> These are bits in return value of VMWARE_CMD_GETVCPU_INFO command.
>> Change VMWARE_CMD_ prefix to GETVCPU_INFO_ one. And move bit-shift
>> operation to the macro body.
> 
> I don't understand:
> 
> $ git grep GETVCPU_INFO
> arch/x86/kernel/cpu/vmware.c:51:#define VMWARE_CMD_GETVCPU_INFO  68
> arch/x86/kernel/cpu/vmware.c:478:       VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx);
> 
> so that's a VMWARE_CMD 68, at least the prefix says so.
> 
> And those two are *bits* in that eax which that hypercall returns.
> 
> Or are those two bits generic but defined in a vmware-specific
> hypercall?
> 
> Hm.
> 

These are VMware hypercall commands:
#define VMWARE_CMD_GETVERSION    10
#define VMWARE_CMD_GETHZ         45
#define VMWARE_CMD_GETVCPU_INFO  68
#define VMWARE_CMD_STEALCLOCK    91


These are VMware-specific macros to analyze return values of 
corresponding commands. They are prefixed with command name.
#define GETVCPU_INFO_LEGACY_X2APIC           BIT(3)
#define GETVCPU_INFO_VCPU_RESERVED           BIT(31)

#define STEALCLOCK_NOT_AVAILABLE (-1)
#define STEALCLOCK_DISABLED        0
#define STEALCLOCK_ENABLED         1


Name VMWARE_CMD_LEGACY_X2APIC was not correct as LEGACY_X2APIC is not a 
command but the meaning of 3rd bit of a return value of 
VMWARE_CMD_GETVCPU_INFO. So, change it to GETVCPU_INFO_LEGACY_X2APIC.
The same change with GETVCPU_INFO_VCPU_RESERVED.
Both these bits are not generic.

--Alexey
diff mbox series

Patch

diff --git a/arch/x86/include/asm/vmware.h b/arch/x86/include/asm/vmware.h
index ac9fc51e2b18..de2533337611 100644
--- a/arch/x86/include/asm/vmware.h
+++ b/arch/x86/include/asm/vmware.h
@@ -8,25 +8,34 @@ 
 
 /*
  * The hypercall definitions differ in the low word of the %edx argument
- * in the following way: the old port base interface uses the port
- * number to distinguish between high- and low bandwidth versions.
+ * in the following way: the old I/O port based interface uses the port
+ * number to distinguish between high- and low bandwidth versions, and
+ * uses IN/OUT instructions to define transfer direction.
  *
  * The new vmcall interface instead uses a set of flags to select
  * bandwidth mode and transfer direction. The flags should be loaded
  * into %dx by any user and are automatically replaced by the port
- * number if the VMWARE_HYPERVISOR_PORT method is used.
- *
- * In short, new driver code should strictly use the new definition of
- * %dx content.
+ * number if the I/O port method is used.
  */
 
-/* Old port-based version */
-#define VMWARE_HYPERVISOR_PORT    0x5658
-#define VMWARE_HYPERVISOR_PORT_HB 0x5659
+#define VMWARE_HYPERVISOR_HB		BIT(0)
+#define VMWARE_HYPERVISOR_OUT		BIT(1)
+
+#define VMWARE_HYPERVISOR_PORT		0x5658
+#define VMWARE_HYPERVISOR_PORT_HB	(VMWARE_HYPERVISOR_PORT | \
+					 VMWARE_HYPERVISOR_HB)
+
+#define VMWARE_HYPERVISOR_MAGIC		0x564d5868U
+
+#define VMWARE_CMD_GETVERSION		10
+#define VMWARE_CMD_GETHZ		45
+#define VMWARE_CMD_GETVCPU_INFO		68
+#define VMWARE_CMD_STEALCLOCK		91
+
+#define CPUID_VMWARE_FEATURES_ECX_VMMCALL	BIT(0)
+#define CPUID_VMWARE_FEATURES_ECX_VMCALL	BIT(1)
 
-/* Current vmcall / vmmcall version */
-#define VMWARE_HYPERVISOR_HB   BIT(0)
-#define VMWARE_HYPERVISOR_OUT  BIT(1)
+extern u8 vmware_hypercall_mode;
 
 /* The low bandwidth call. The low word of edx is presumed clear. */
 #define VMWARE_HYPERCALL						\
@@ -54,4 +63,43 @@ 
 		      "rep insb",					\
 		      "vmcall", X86_FEATURE_VMCALL,			\
 		      "vmmcall", X86_FEATURE_VMW_VMMCALL)
+
+#define VMWARE_PORT(cmd, eax, ebx, ecx, edx)				\
+	__asm__("inl (%%dx), %%eax" :					\
+		"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :		\
+		"a"(VMWARE_HYPERVISOR_MAGIC),				\
+		"c"(VMWARE_CMD_##cmd),					\
+		"d"(VMWARE_HYPERVISOR_PORT), "b"(UINT_MAX) :		\
+		"memory")
+
+#define VMWARE_VMCALL(cmd, eax, ebx, ecx, edx)				\
+	__asm__("vmcall" :						\
+		"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :		\
+		"a"(VMWARE_HYPERVISOR_MAGIC),				\
+		"c"(VMWARE_CMD_##cmd),					\
+		"d"(0), "b"(UINT_MAX) :					\
+		"memory")
+
+#define VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx)				\
+	__asm__("vmmcall" :						\
+		"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :		\
+		"a"(VMWARE_HYPERVISOR_MAGIC),				\
+		"c"(VMWARE_CMD_##cmd),					\
+		"d"(0), "b"(UINT_MAX) :					\
+		"memory")
+
+#define VMWARE_CMD(cmd, eax, ebx, ecx, edx) do {		\
+	switch (vmware_hypercall_mode) {			\
+	case CPUID_VMWARE_FEATURES_ECX_VMCALL:			\
+		VMWARE_VMCALL(cmd, eax, ebx, ecx, edx);		\
+		break;						\
+	case CPUID_VMWARE_FEATURES_ECX_VMMCALL:			\
+		VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx);	\
+		break;						\
+	default:						\
+		VMWARE_PORT(cmd, eax, ebx, ecx, edx);		\
+		break;						\
+	}							\
+	} while (0)
+
 #endif
diff --git a/arch/x86/kernel/cpu/vmware.c b/arch/x86/kernel/cpu/vmware.c
index 11f83d07925e..4db8e1daa4a1 100644
--- a/arch/x86/kernel/cpu/vmware.c
+++ b/arch/x86/kernel/cpu/vmware.c
@@ -41,60 +41,14 @@ 
 
 #define CPUID_VMWARE_INFO_LEAF               0x40000000
 #define CPUID_VMWARE_FEATURES_LEAF           0x40000010
-#define CPUID_VMWARE_FEATURES_ECX_VMMCALL    BIT(0)
-#define CPUID_VMWARE_FEATURES_ECX_VMCALL     BIT(1)
 
-#define VMWARE_HYPERVISOR_MAGIC	0x564D5868
-
-#define VMWARE_CMD_GETVERSION    10
-#define VMWARE_CMD_GETHZ         45
-#define VMWARE_CMD_GETVCPU_INFO  68
-#define VMWARE_CMD_LEGACY_X2APIC  3
-#define VMWARE_CMD_VCPU_RESERVED 31
-#define VMWARE_CMD_STEALCLOCK    91
+#define VCPU_LEGACY_X2APIC	3
+#define VCPU_RESERVED		31
 
 #define STEALCLOCK_NOT_AVAILABLE (-1)
 #define STEALCLOCK_DISABLED        0
 #define STEALCLOCK_ENABLED         1
 
-#define VMWARE_PORT(cmd, eax, ebx, ecx, edx)				\
-	__asm__("inl (%%dx), %%eax" :					\
-		"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :		\
-		"a"(VMWARE_HYPERVISOR_MAGIC),				\
-		"c"(VMWARE_CMD_##cmd),					\
-		"d"(VMWARE_HYPERVISOR_PORT), "b"(UINT_MAX) :		\
-		"memory")
-
-#define VMWARE_VMCALL(cmd, eax, ebx, ecx, edx)				\
-	__asm__("vmcall" :						\
-		"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :		\
-		"a"(VMWARE_HYPERVISOR_MAGIC),				\
-		"c"(VMWARE_CMD_##cmd),					\
-		"d"(0), "b"(UINT_MAX) :					\
-		"memory")
-
-#define VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx)                         \
-	__asm__("vmmcall" :						\
-		"=a"(eax), "=c"(ecx), "=d"(edx), "=b"(ebx) :		\
-		"a"(VMWARE_HYPERVISOR_MAGIC),				\
-		"c"(VMWARE_CMD_##cmd),					\
-		"d"(0), "b"(UINT_MAX) :					\
-		"memory")
-
-#define VMWARE_CMD(cmd, eax, ebx, ecx, edx) do {		\
-	switch (vmware_hypercall_mode) {			\
-	case CPUID_VMWARE_FEATURES_ECX_VMCALL:			\
-		VMWARE_VMCALL(cmd, eax, ebx, ecx, edx);		\
-		break;						\
-	case CPUID_VMWARE_FEATURES_ECX_VMMCALL:			\
-		VMWARE_VMMCALL(cmd, eax, ebx, ecx, edx);	\
-		break;						\
-	default:						\
-		VMWARE_PORT(cmd, eax, ebx, ecx, edx);		\
-		break;						\
-	}							\
-	} while (0)
-
 struct vmware_steal_time {
 	union {
 		uint64_t clock;	/* stolen time counter in units of vtsc */
@@ -108,7 +62,8 @@  struct vmware_steal_time {
 };
 
 static unsigned long vmware_tsc_khz __ro_after_init;
-static u8 vmware_hypercall_mode     __ro_after_init;
+u8 vmware_hypercall_mode __ro_after_init;
+EXPORT_SYMBOL_GPL(vmware_hypercall_mode);
 
 static inline int __vmware_platform(void)
 {
@@ -476,8 +431,8 @@  static bool __init vmware_legacy_x2apic_available(void)
 {
 	uint32_t eax, ebx, ecx, edx;
 	VMWARE_CMD(GETVCPU_INFO, eax, ebx, ecx, edx);
-	return !(eax & BIT(VMWARE_CMD_VCPU_RESERVED)) &&
-		(eax & BIT(VMWARE_CMD_LEGACY_X2APIC));
+	return !(eax & BIT(VCPU_RESERVED)) &&
+		(eax & BIT(VCPU_LEGACY_X2APIC));
 }
 
 #ifdef CONFIG_AMD_MEM_ENCRYPT