diff mbox series

[v14,06/22] KVM: SEV: Add KVM_SEV_SNP_LAUNCH_UPDATE command

Message ID 20240421180122.1650812-7-michael.roth@amd.com
State New
Headers show
Series Add AMD Secure Nested Paging (SEV-SNP) Hypervisor Support | expand

Commit Message

Michael Roth April 21, 2024, 6:01 p.m. UTC
From: Brijesh Singh <brijesh.singh@amd.com>

A key aspect of a launching an SNP guest is initializing it with a
known/measured payload which is then encrypted into guest memory as
pre-validated private pages and then measured into the cryptographic
launch context created with KVM_SEV_SNP_LAUNCH_START so that the guest
can attest itself after booting.

Since all private pages are provided by guest_memfd, make use of the
kvm_gmem_populate() interface to handle this. The general flow is that
guest_memfd will handle allocating the pages associated with the GPA
ranges being initialized by each particular call of
KVM_SEV_SNP_LAUNCH_UPDATE, copying data from userspace into those pages,
and then the post_populate callback will do the work of setting the
RMP entries for these pages to private and issuing the SNP firmware
calls to encrypt/measure them.

For more information see the SEV-SNP specification.

Signed-off-by: Brijesh Singh <brijesh.singh@amd.com>
Co-developed-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
Signed-off-by: Ashish Kalra <ashish.kalra@amd.com>
---
 .../virt/kvm/x86/amd-memory-encryption.rst    |  54 ++++
 arch/x86/include/uapi/asm/kvm.h               |  19 ++
 arch/x86/kvm/svm/sev.c                        | 237 ++++++++++++++++++
 3 files changed, 310 insertions(+)

Comments

Sean Christopherson April 24, 2024, 11:58 p.m. UTC | #1
On Sun, Apr 21, 2024, Michael Roth wrote:
> diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
> index 9d08d1202544..d3ae4ded91df 100644
> --- a/arch/x86/kvm/svm/sev.c
> +++ b/arch/x86/kvm/svm/sev.c
> @@ -258,6 +258,35 @@ static void sev_decommission(unsigned int handle)
>  	sev_guest_decommission(&decommission, NULL);
>  }
>  
> +static int snp_page_reclaim(u64 pfn)
> +{
> +	struct sev_data_snp_page_reclaim data = {0};
> +	int err, rc;
> +
> +	data.paddr = __sme_set(pfn << PAGE_SHIFT);
> +	rc = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err);
> +	if (WARN_ON_ONCE(rc)) {

This now WARNs here *and* in the caller, which is overkill.  Pick one.

> +		/*
> +		 * This shouldn't happen under normal circumstances, but if the
> +		 * reclaim failed, then the page is no longer safe to use.

Explain _why_ it's unsafe.  The code makes it quite clear that reclaim shouldn't
fail.  E.g. I assume it's bound to the ASID still?  Something else?

This is all messy, too.  snp_launch_update_vmsa() does RECLAIM but doesn't
convert the page back to shared, which seems wrong.  sev_gmem_post_populate()
open codes the calls separately, and then snp_cleanup_guest_buf() bundles them
together.

Yeesh, and the return types are all mixed.  snp_cleanup_guest_buf() returns a
bool on success, but the helpers it uses return 0/-errno.  Please convert
everything to return 0/-errno, boolean "error codes" are hard to follow and make
the code unnecessarily brittle.

> +	if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src)) {
> +		ret = -EINVAL;

Just return -EINVAL, no?

> +		goto out;
> +	}
> +
> +	for (gfn = gfn_start, i = 0; gfn < gfn_start + npages; gfn++, i++) {
> +		struct sev_data_snp_launch_update fw_args = {0};
> +		bool assigned;
> +		void *vaddr;
> +		int level;
> +
> +		if (!kvm_mem_is_private(kvm, gfn)) {
> +			pr_debug("%s: Failed to ensure GFN 0x%llx has private memory attribute set\n",
> +				 __func__, gfn);
> +			ret = -EINVAL;
> +			break;
> +		}
> +
> +		ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
> +		if (ret || assigned) {
> +			pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",
> +				 __func__, gfn, ret, assigned);
> +			ret = -EINVAL;
> +			break;
> +		}
> +
> +		if (src) {
> +			vaddr = kmap_local_pfn(pfn + i);
> +			ret = copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE);
> +			if (ret) {
> +				pr_debug("Failed to copy source page into GFN 0x%llx\n", gfn);
> +				goto out_unmap;
> +			}
> +		}
> +
> +		ret = rmp_make_private(pfn + i, gfn << PAGE_SHIFT, PG_LEVEL_4K,
> +				       sev_get_asid(kvm), true);
> +		if (ret) {
> +			pr_debug("%s: Failed to mark RMP entry for GFN 0x%llx as private, ret: %d\n",
> +				 __func__, gfn, ret);
> +			goto out_unmap;
> +		}
> +
> +		n_private++;
> +
> +		fw_args.gctx_paddr = __psp_pa(sev->snp_context);
> +		fw_args.address = __sme_set(pfn_to_hpa(pfn + i));
> +		fw_args.page_size = PG_LEVEL_TO_RMP(PG_LEVEL_4K);
> +		fw_args.page_type = sev_populate_args->type;
> +		ret = __sev_issue_cmd(sev_populate_args->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE,
> +				      &fw_args, &sev_populate_args->fw_error);
> +		if (ret) {
> +			pr_debug("%s: SEV-SNP launch update failed, ret: 0x%x, fw_error: 0x%x\n",
> +				 __func__, ret, sev_populate_args->fw_error);
> +
> +			if (WARN_ON_ONCE(snp_page_reclaim(pfn + i)))
> +				goto out_unmap;
> +
> +			/*
> +			 * When invalid CPUID function entries are detected,
> +			 * firmware writes the expected values into the page and
> +			 * leaves it unencrypted so it can be used for debugging
> +			 * and error-reporting.
> +			 *
> +			 * Copy this page back into the source buffer so
> +			 * userspace can use this information to provide
> +			 * information on which CPUID leaves/fields failed CPUID
> +			 * validation.
> +			 */
> +			if (sev_populate_args->type == KVM_SEV_SNP_PAGE_TYPE_CPUID &&
> +			    sev_populate_args->fw_error == SEV_RET_INVALID_PARAM) {
> +				if (WARN_ON_ONCE(host_rmp_make_shared(pfn + i, PG_LEVEL_4K)))
> +					goto out_unmap;
> +
> +				if (copy_to_user(src + i * PAGE_SIZE, vaddr,
> +						 PAGE_SIZE))
> +					pr_debug("Failed to write CPUID page back to userspace\n");
> +
> +				/* PFN is hypervisor-owned at this point, skip cleanup for it. */
> +				n_private--;

If reclaim or make_shared fails, KVM will attempt make_shared again.  And I am
not a fan of keeping vaddr mapped after making the pfn private.  Functionally
it probably changes nothing, but conceptually it's odd.  And "mapping" is free
(AFAIK).  Oh, and vaddr is subtly guaranteed to be valid based on the type, which
is fun.

So why not unmap immediately after the first copy_from_user(), and then this
can be:


		if (ret)
			goto err;

	}

	return 0;

err:
	if (!<reclaim and make shared>() &&
	    sev_populate_args->type == KVM_SEV_SNP_PAGE_TYPE_CPUID &&
	    sev_populate_args->fw_error == SEV_RET_INVALID_PARAM) {
		vaddr = kmap_local(...);
		copy_to_user(...);
		kunmap_local(vaddr);
	}
	for (i = 0; i < n_private - 1; i++)
		<reclaim? and make shared()>

	return ret;
	
> +                         sev_populate_args->fw_error == SEV_RET_INVALID_PARAM)
> +			}
> +		}
> +
> +out_unmap:
> +		kunmap_local(vaddr);
> +		if (ret)
> +			break;
> +	}
> +
> +out:
> +	if (ret) {
> +		pr_debug("%s: exiting with error ret %d, restoring %d gmem PFNs to shared.\n",
> +			 __func__, ret, n_private);
> +		for (i = 0; i < n_private; i++)
> +			WARN_ON_ONCE(host_rmp_make_shared(pfn + i, PG_LEVEL_4K));
> +	}
> +
> +	return ret;
> +}

...

> +	src = params.type == KVM_SEV_SNP_PAGE_TYPE_ZERO ? NULL : u64_to_user_ptr(params.uaddr);
> +
> +	count = kvm_gmem_populate(kvm, params.gfn_start, src, npages,
> +				  sev_gmem_post_populate, &sev_populate_args);
> +	if (count < 0) {
> +		argp->error = sev_populate_args.fw_error;
> +		pr_debug("%s: kvm_gmem_populate failed, ret %ld (fw_error %d)\n",
> +			 __func__, count, argp->error);
> +		ret = -EIO;
> +	} else if (count <= npages) {

This seems like excessive paranoia.

> +		params.gfn_start += count;
> +		params.len -= count * PAGE_SIZE;
> +		if (params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO)
> +			params.uaddr += count * PAGE_SIZE;
> +
> +		ret = copy_to_user(u64_to_user_ptr(argp->data), &params, sizeof(params))
> +		      ? -EIO : 0;

copy_to_user() failures typically return -EFAULT.  The style is not the most
readable.  Maybe this?

		ret = 0;
		if (copy_to_user(...))
			ret = -EFAULT;

> +	} else {
> +		WARN_ONCE(1, "Completed page count %ld exceeds requested amount %ld",
> +			  count, npages);
> +		ret = -EINVAL;
> +	}
> +
> +out:
> +	mutex_unlock(&kvm->slots_lock);
> +
> +	return ret;
> +}
> +
>  int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
>  {
>  	struct kvm_sev_cmd sev_cmd;
> @@ -2217,6 +2451,9 @@ int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
>  	case KVM_SEV_SNP_LAUNCH_START:
>  		r = snp_launch_start(kvm, &sev_cmd);
>  		break;
> +	case KVM_SEV_SNP_LAUNCH_UPDATE:
> +		r = snp_launch_update(kvm, &sev_cmd);
> +		break;
>  	default:
>  		r = -EINVAL;
>  		goto out;
> -- 
> 2.25.1
>
diff mbox series

Patch

diff --git a/Documentation/virt/kvm/x86/amd-memory-encryption.rst b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
index d4c4a0b90bc9..60728868c5c6 100644
--- a/Documentation/virt/kvm/x86/amd-memory-encryption.rst
+++ b/Documentation/virt/kvm/x86/amd-memory-encryption.rst
@@ -483,6 +483,60 @@  Returns: 0 on success, -negative on error
 See SNP_LAUNCH_START in the SEV-SNP specification [snp-fw-abi]_ for further
 details on the input parameters in ``struct kvm_sev_snp_launch_start``.
 
+19. KVM_SEV_SNP_LAUNCH_UPDATE
+-----------------------------
+
+The KVM_SEV_SNP_LAUNCH_UPDATE command is used for loading userspace-provided
+data into a guest GPA range, measuring the contents into the SNP guest context
+created by KVM_SEV_SNP_LAUNCH_START, and then encrypting/validating that GPA
+range so that it will be immediately readable using the encryption key
+associated with the guest context once it is booted, after which point it can
+attest the measurement associated with its context before unlocking any
+secrets.
+
+It is required that the GPA ranges initialized by this command have had the
+KVM_MEMORY_ATTRIBUTE_PRIVATE attribute set in advance. See the documentation
+for KVM_SET_MEMORY_ATTRIBUTES for more details on this aspect.
+
+Upon success, this command is not guaranteed to have processed the entire
+range requested. Instead, the ``gfn_start``, ``uaddr``, and ``len`` fields of
+``struct kvm_sev_snp_launch_update`` will be updated to correspond to the
+remaining range that has yet to be processed. The caller should continue
+calling this command until those fields indicate the entire range has been
+processed, e.g. ``len`` is 0, ``gfn_start`` is equal to the last GFN in the
+range plus 1, and ``uaddr`` is the last byte of the userspace-provided source
+buffer address plus 1. In the case where ``type`` is KVM_SEV_SNP_PAGE_TYPE_ZERO,
+``uaddr`` will be ignored completely.
+
+Parameters (in): struct  kvm_sev_snp_launch_update
+
+Returns: 0 on success, < 0 on error, -EAGAIN if caller should retry
+
+::
+
+        struct kvm_sev_snp_launch_update {
+                __u64 gfn_start;        /* Guest page number to load/encrypt data into. */
+                __u64 uaddr;            /* Userspace address of data to be loaded/encrypted. */
+                __u64 len;              /* 4k-aligned length in bytes to copy into guest memory.*/
+                __u8 type;              /* The type of the guest pages being initialized. */
+                __u8 pad0;
+                __u16 flags;            /* Must be zero. */
+                __u32 pad1;
+                __u64 pad2[4];
+
+        };
+
+where the allowed values for page_type are #define'd as::
+
+        KVM_SEV_SNP_PAGE_TYPE_NORMAL
+        KVM_SEV_SNP_PAGE_TYPE_ZERO
+        KVM_SEV_SNP_PAGE_TYPE_UNMEASURED
+        KVM_SEV_SNP_PAGE_TYPE_SECRETS
+        KVM_SEV_SNP_PAGE_TYPE_CPUID
+
+See the SEV-SNP spec [snp-fw-abi]_ for further details on how each page type is
+used/measured.
+
 Device attribute API
 ====================
 
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 5765391f0fdb..3c9255de76db 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -699,6 +699,7 @@  enum sev_cmd_id {
 
 	/* SNP-specific commands */
 	KVM_SEV_SNP_LAUNCH_START = 100,
+	KVM_SEV_SNP_LAUNCH_UPDATE,
 
 	KVM_SEV_NR_MAX,
 };
@@ -833,6 +834,24 @@  struct kvm_sev_snp_launch_start {
 	__u64 pad1[4];
 };
 
+/* Kept in sync with firmware values for simplicity. */
+#define KVM_SEV_SNP_PAGE_TYPE_NORMAL		0x1
+#define KVM_SEV_SNP_PAGE_TYPE_ZERO		0x3
+#define KVM_SEV_SNP_PAGE_TYPE_UNMEASURED	0x4
+#define KVM_SEV_SNP_PAGE_TYPE_SECRETS		0x5
+#define KVM_SEV_SNP_PAGE_TYPE_CPUID		0x6
+
+struct kvm_sev_snp_launch_update {
+	__u64 gfn_start;
+	__u64 uaddr;
+	__u64 len;
+	__u8 type;
+	__u8 pad0;
+	__u16 flags;
+	__u32 pad1;
+	__u64 pad2[4];
+};
+
 #define KVM_X2APIC_API_USE_32BIT_IDS            (1ULL << 0)
 #define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK  (1ULL << 1)
 
diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index 9d08d1202544..d3ae4ded91df 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -258,6 +258,35 @@  static void sev_decommission(unsigned int handle)
 	sev_guest_decommission(&decommission, NULL);
 }
 
+static int snp_page_reclaim(u64 pfn)
+{
+	struct sev_data_snp_page_reclaim data = {0};
+	int err, rc;
+
+	data.paddr = __sme_set(pfn << PAGE_SHIFT);
+	rc = sev_do_cmd(SEV_CMD_SNP_PAGE_RECLAIM, &data, &err);
+	if (WARN_ON_ONCE(rc)) {
+		/*
+		 * This shouldn't happen under normal circumstances, but if the
+		 * reclaim failed, then the page is no longer safe to use.
+		 */
+		snp_leak_pages(pfn, 1);
+	}
+
+	return rc;
+}
+
+static int host_rmp_make_shared(u64 pfn, enum pg_level level)
+{
+	int rc;
+
+	rc = rmp_make_shared(pfn, level);
+	if (rc)
+		snp_leak_pages(pfn, page_level_size(level) >> PAGE_SHIFT);
+
+	return rc;
+}
+
 static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
 {
 	struct sev_data_deactivate deactivate;
@@ -2118,6 +2147,211 @@  static int snp_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
 	return rc;
 }
 
+struct sev_gmem_populate_args {
+	__u8 type;
+	int sev_fd;
+	int fw_error;
+};
+
+static int sev_gmem_post_populate(struct kvm *kvm, gfn_t gfn_start, kvm_pfn_t pfn,
+				  void __user *src, int order, void *opaque)
+{
+	struct sev_gmem_populate_args *sev_populate_args = opaque;
+	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+	int n_private = 0, ret, i;
+	int npages = (1 << order);
+	gfn_t gfn;
+
+	pr_debug("%s: gfn_start 0x%llx pfn_start 0x%llx npages %d\n",
+		 __func__, gfn_start, pfn, npages);
+
+	if (WARN_ON_ONCE(sev_populate_args->type != KVM_SEV_SNP_PAGE_TYPE_ZERO && !src)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	for (gfn = gfn_start, i = 0; gfn < gfn_start + npages; gfn++, i++) {
+		struct sev_data_snp_launch_update fw_args = {0};
+		bool assigned;
+		void *vaddr;
+		int level;
+
+		if (!kvm_mem_is_private(kvm, gfn)) {
+			pr_debug("%s: Failed to ensure GFN 0x%llx has private memory attribute set\n",
+				 __func__, gfn);
+			ret = -EINVAL;
+			break;
+		}
+
+		ret = snp_lookup_rmpentry((u64)pfn + i, &assigned, &level);
+		if (ret || assigned) {
+			pr_debug("%s: Failed to ensure GFN 0x%llx RMP entry is initial shared state, ret: %d assigned: %d\n",
+				 __func__, gfn, ret, assigned);
+			ret = -EINVAL;
+			break;
+		}
+
+		if (src) {
+			vaddr = kmap_local_pfn(pfn + i);
+			ret = copy_from_user(vaddr, src + i * PAGE_SIZE, PAGE_SIZE);
+			if (ret) {
+				pr_debug("Failed to copy source page into GFN 0x%llx\n", gfn);
+				goto out_unmap;
+			}
+		}
+
+		ret = rmp_make_private(pfn + i, gfn << PAGE_SHIFT, PG_LEVEL_4K,
+				       sev_get_asid(kvm), true);
+		if (ret) {
+			pr_debug("%s: Failed to mark RMP entry for GFN 0x%llx as private, ret: %d\n",
+				 __func__, gfn, ret);
+			goto out_unmap;
+		}
+
+		n_private++;
+
+		fw_args.gctx_paddr = __psp_pa(sev->snp_context);
+		fw_args.address = __sme_set(pfn_to_hpa(pfn + i));
+		fw_args.page_size = PG_LEVEL_TO_RMP(PG_LEVEL_4K);
+		fw_args.page_type = sev_populate_args->type;
+		ret = __sev_issue_cmd(sev_populate_args->sev_fd, SEV_CMD_SNP_LAUNCH_UPDATE,
+				      &fw_args, &sev_populate_args->fw_error);
+		if (ret) {
+			pr_debug("%s: SEV-SNP launch update failed, ret: 0x%x, fw_error: 0x%x\n",
+				 __func__, ret, sev_populate_args->fw_error);
+
+			if (WARN_ON_ONCE(snp_page_reclaim(pfn + i)))
+				goto out_unmap;
+
+			/*
+			 * When invalid CPUID function entries are detected,
+			 * firmware writes the expected values into the page and
+			 * leaves it unencrypted so it can be used for debugging
+			 * and error-reporting.
+			 *
+			 * Copy this page back into the source buffer so
+			 * userspace can use this information to provide
+			 * information on which CPUID leaves/fields failed CPUID
+			 * validation.
+			 */
+			if (sev_populate_args->type == KVM_SEV_SNP_PAGE_TYPE_CPUID &&
+			    sev_populate_args->fw_error == SEV_RET_INVALID_PARAM) {
+				if (WARN_ON_ONCE(host_rmp_make_shared(pfn + i, PG_LEVEL_4K)))
+					goto out_unmap;
+
+				if (copy_to_user(src + i * PAGE_SIZE, vaddr,
+						 PAGE_SIZE))
+					pr_debug("Failed to write CPUID page back to userspace\n");
+
+				/* PFN is hypervisor-owned at this point, skip cleanup for it. */
+				n_private--;
+			}
+		}
+
+out_unmap:
+		kunmap_local(vaddr);
+		if (ret)
+			break;
+	}
+
+out:
+	if (ret) {
+		pr_debug("%s: exiting with error ret %d, restoring %d gmem PFNs to shared.\n",
+			 __func__, ret, n_private);
+		for (i = 0; i < n_private; i++)
+			WARN_ON_ONCE(host_rmp_make_shared(pfn + i, PG_LEVEL_4K));
+	}
+
+	return ret;
+}
+
+static int snp_launch_update(struct kvm *kvm, struct kvm_sev_cmd *argp)
+{
+	struct kvm_sev_info *sev = &to_kvm_svm(kvm)->sev_info;
+	struct sev_gmem_populate_args sev_populate_args = {0};
+	struct kvm_sev_snp_launch_update params;
+	struct kvm_memory_slot *memslot;
+	long npages, count;
+	void __user *src;
+	int ret = 0;
+
+	if (!sev_snp_guest(kvm) || !sev->snp_context)
+		return -EINVAL;
+
+	if (copy_from_user(&params, u64_to_user_ptr(argp->data), sizeof(params)))
+		return -EFAULT;
+
+	pr_debug("%s: GFN start 0x%llx length 0x%llx type %d flags %d\n", __func__,
+		 params.gfn_start, params.len, params.type, params.flags);
+
+	if (!PAGE_ALIGNED(params.len) || params.flags ||
+	    (params.type != KVM_SEV_SNP_PAGE_TYPE_NORMAL &&
+	     params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO &&
+	     params.type != KVM_SEV_SNP_PAGE_TYPE_UNMEASURED &&
+	     params.type != KVM_SEV_SNP_PAGE_TYPE_SECRETS &&
+	     params.type != KVM_SEV_SNP_PAGE_TYPE_CPUID))
+		return -EINVAL;
+
+	npages = params.len / PAGE_SIZE;
+
+	/*
+	 * For each GFN that's being prepared as part of the initial guest
+	 * state, the following pre-conditions are verified:
+	 *
+	 *   1) The backing memslot is a valid private memslot.
+	 *   2) The GFN has been set to private via KVM_SET_MEMORY_ATTRIBUTES
+	 *      beforehand.
+	 *   3) The PFN of the guest_memfd has not already been set to private
+	 *      in the RMP table.
+	 *
+	 * The KVM MMU relies on kvm->mmu_invalidate_seq to retry nested page
+	 * faults if there's a race between a fault and an attribute update via
+	 * KVM_SET_MEMORY_ATTRIBUTES, and a similar approach could be utilized
+	 * here. However, kvm->slots_lock guards against both this as well as
+	 * concurrent memslot updates occurring while these checks are being
+	 * performed, so use that here to make it easier to reason about the
+	 * initial expected state and better guard against unexpected
+	 * situations.
+	 */
+	mutex_lock(&kvm->slots_lock);
+
+	memslot = gfn_to_memslot(kvm, params.gfn_start);
+	if (!kvm_slot_can_be_private(memslot)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	sev_populate_args.sev_fd = argp->sev_fd;
+	sev_populate_args.type = params.type;
+	src = params.type == KVM_SEV_SNP_PAGE_TYPE_ZERO ? NULL : u64_to_user_ptr(params.uaddr);
+
+	count = kvm_gmem_populate(kvm, params.gfn_start, src, npages,
+				  sev_gmem_post_populate, &sev_populate_args);
+	if (count < 0) {
+		argp->error = sev_populate_args.fw_error;
+		pr_debug("%s: kvm_gmem_populate failed, ret %ld (fw_error %d)\n",
+			 __func__, count, argp->error);
+		ret = -EIO;
+	} else if (count <= npages) {
+		params.gfn_start += count;
+		params.len -= count * PAGE_SIZE;
+		if (params.type != KVM_SEV_SNP_PAGE_TYPE_ZERO)
+			params.uaddr += count * PAGE_SIZE;
+
+		ret = copy_to_user(u64_to_user_ptr(argp->data), &params, sizeof(params))
+		      ? -EIO : 0;
+	} else {
+		WARN_ONCE(1, "Completed page count %ld exceeds requested amount %ld",
+			  count, npages);
+		ret = -EINVAL;
+	}
+
+out:
+	mutex_unlock(&kvm->slots_lock);
+
+	return ret;
+}
+
 int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
 {
 	struct kvm_sev_cmd sev_cmd;
@@ -2217,6 +2451,9 @@  int sev_mem_enc_ioctl(struct kvm *kvm, void __user *argp)
 	case KVM_SEV_SNP_LAUNCH_START:
 		r = snp_launch_start(kvm, &sev_cmd);
 		break;
+	case KVM_SEV_SNP_LAUNCH_UPDATE:
+		r = snp_launch_update(kvm, &sev_cmd);
+		break;
 	default:
 		r = -EINVAL;
 		goto out;