2020-07-16 13:57:56

by Jarkko Sakkinen

[permalink] [raw]
Subject: [PATCH v36 14/24] x86/sgx: Add SGX_IOC_ENCLAVE_INIT

Add an ioctl that performs ENCLS[EINIT], which locks down the measurement
and initializes the enclave for entrance. After this, new pages can no
longer be added.

Acked-by: Jethro Beekman <[email protected]>
Tested-by: Jethro Beekman <[email protected]>
Tested-by: Haitao Huang <[email protected]>
Tested-by: Chunyang Hui <[email protected]>
Tested-by: Jordan Hand <[email protected]>
Tested-by: Nathaniel McCallum <[email protected]>
Tested-by: Seth Moore <[email protected]>
Co-developed-by: Sean Christopherson <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Co-developed-by: Suresh Siddha <[email protected]>
Signed-off-by: Suresh Siddha <[email protected]>
Signed-off-by: Jarkko Sakkinen <[email protected]>
---
arch/x86/include/uapi/asm/sgx.h | 11 ++
arch/x86/kernel/cpu/sgx/ioctl.c | 188 ++++++++++++++++++++++++++++++++
2 files changed, 199 insertions(+)

diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
index c8f199b3fb6f..5edb08ab8fd0 100644
--- a/arch/x86/include/uapi/asm/sgx.h
+++ b/arch/x86/include/uapi/asm/sgx.h
@@ -23,6 +23,8 @@ enum sgx_page_flags {
_IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
#define SGX_IOC_ENCLAVE_ADD_PAGES \
_IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_pages)
+#define SGX_IOC_ENCLAVE_INIT \
+ _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)

/**
* struct sgx_enclave_create - parameter structure for the
@@ -52,4 +54,13 @@ struct sgx_enclave_add_pages {
__u64 count;
};

+/**
+ * struct sgx_enclave_init - parameter structure for the
+ * %SGX_IOC_ENCLAVE_INIT ioctl
+ * @sigstruct: address for the SIGSTRUCT data
+ */
+struct sgx_enclave_init {
+ __u64 sigstruct;
+};
+
#endif /* _UAPI_ASM_X86_SGX_H */
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index c63a51362d14..3444de955191 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -16,6 +16,9 @@
#include "encl.h"
#include "encls.h"

+/* A per-cpu cache for the last known values of IA32_SGXLEPUBKEYHASHx MSRs. */
+static DEFINE_PER_CPU(u64 [4], sgx_lepubkeyhash_cache);
+
static u32 sgx_calc_ssa_frame_size(u32 miscselect, u64 xfrm)
{
u32 size_max = PAGE_SIZE;
@@ -485,6 +488,188 @@ static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg)
return ret;
}

+static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,
+ void *hash)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+
+ return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
+}
+
+static int sgx_get_key_hash(const void *modulus, void *hash)
+{
+ struct crypto_shash *tfm;
+ int ret;
+
+ tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ ret = __sgx_get_key_hash(tfm, modulus, hash);
+
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+static void sgx_update_lepubkeyhash_msrs(u64 *lepubkeyhash, bool enforce)
+{
+ u64 *cache;
+ int i;
+
+ cache = per_cpu(sgx_lepubkeyhash_cache, smp_processor_id());
+ for (i = 0; i < 4; i++) {
+ if (enforce || (lepubkeyhash[i] != cache[i])) {
+ wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
+ cache[i] = lepubkeyhash[i];
+ }
+ }
+}
+
+static int sgx_einit(struct sgx_sigstruct *sigstruct, void *token,
+ struct sgx_epc_page *secs, u64 *lepubkeyhash)
+{
+ int ret;
+
+ preempt_disable();
+ sgx_update_lepubkeyhash_msrs(lepubkeyhash, false);
+ ret = __einit(sigstruct, token, sgx_get_epc_addr(secs));
+ if (ret == SGX_INVALID_EINITTOKEN) {
+ sgx_update_lepubkeyhash_msrs(lepubkeyhash, true);
+ ret = __einit(sigstruct, token, sgx_get_epc_addr(secs));
+ }
+ preempt_enable();
+ return ret;
+}
+
+static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
+ void *token)
+{
+ u64 mrsigner[4];
+ int ret;
+ int i;
+ int j;
+
+ /* Check that the required attributes have been authorized. */
+ if (encl->secs_attributes & ~encl->allowed_attributes)
+ return -EACCES;
+
+ ret = sgx_get_key_hash(sigstruct->modulus, mrsigner);
+ if (ret)
+ return ret;
+
+ mutex_lock(&encl->lock);
+
+ /*
+ * Periodically, EINIT polls for certain asynchronous events. If such an
+ * event is detected, it completes with SGX_UNMSKED_EVENT.
+ */
+ for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
+ for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
+ ret = sgx_einit(sigstruct, token, encl->secs.epc_page,
+ mrsigner);
+ if (ret == SGX_UNMASKED_EVENT)
+ continue;
+ else
+ break;
+ }
+
+ if (ret != SGX_UNMASKED_EVENT)
+ break;
+
+ msleep_interruptible(SGX_EINIT_SLEEP_TIME);
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ goto err_out;
+ }
+ }
+
+ if (ret & ENCLS_FAULT_FLAG) {
+ if (encls_failed(ret))
+ ENCLS_WARN(ret, "EINIT");
+
+ sgx_encl_destroy(encl);
+ ret = -EFAULT;
+ } else if (ret) {
+ pr_debug("EINIT returned %d\n", ret);
+ ret = -EPERM;
+ } else {
+ atomic_or(SGX_ENCL_INITIALIZED, &encl->flags);
+ }
+
+err_out:
+ mutex_unlock(&encl->lock);
+ return ret;
+}
+
+/**
+ * sgx_ioc_enclave_init - handler for %SGX_IOC_ENCLAVE_INIT
+ *
+ * @filep: open file to /dev/sgx
+ * @arg: userspace pointer to a struct sgx_enclave_init instance
+ *
+ * Flush any outstanding enqueued EADD operations and perform EINIT. The
+ * Launch Enclave Public Key Hash MSRs are rewritten as necessary to match
+ * the enclave's MRSIGNER, which is caculated from the provided sigstruct.
+ *
+ * Return:
+ * 0 on success,
+ * SGX error code on EINIT failure,
+ * -errno otherwise
+ */
+static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
+{
+ struct sgx_sigstruct *sigstruct;
+ struct sgx_enclave_init einit;
+ struct page *initp_page;
+ void *token;
+ int ret;
+
+ if ((atomic_read(&encl->flags) & SGX_ENCL_INITIALIZED) ||
+ !(atomic_read(&encl->flags) & SGX_ENCL_CREATED))
+ return -EINVAL;
+
+ if (copy_from_user(&einit, arg, sizeof(einit)))
+ return -EFAULT;
+
+ initp_page = alloc_page(GFP_KERNEL);
+ if (!initp_page)
+ return -ENOMEM;
+
+ sigstruct = kmap(initp_page);
+ token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2);
+ memset(token, 0, SGX_LAUNCH_TOKEN_SIZE);
+
+ if (copy_from_user(sigstruct, (void __user *)einit.sigstruct,
+ sizeof(*sigstruct))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * A legacy field used with Intel signed enclaves. These used to mean
+ * regular and architectural enclaves. The CPU only accepts these values
+ * but they do not have any other meaning.
+ *
+ * Thus, reject any other values.
+ */
+ if (sigstruct->header.vendor != 0x0000 &&
+ sigstruct->header.vendor != 0x8086) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = sgx_encl_init(encl, sigstruct, token);
+
+out:
+ kunmap(initp_page);
+ __free_page(initp_page);
+ return ret;
+}
+
+
long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct sgx_encl *encl = filep->private_data;
@@ -506,6 +691,9 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
case SGX_IOC_ENCLAVE_ADD_PAGES:
ret = sgx_ioc_enclave_add_pages(encl, (void __user *)arg);
break;
+ case SGX_IOC_ENCLAVE_INIT:
+ ret = sgx_ioc_enclave_init(encl, (void __user *)arg);
+ break;
default:
ret = -ENOIOCTLCMD;
break;
--
2.25.1


2020-08-06 16:50:58

by Darren Kenny

[permalink] [raw]
Subject: Re: [PATCH v36 14/24] x86/sgx: Add SGX_IOC_ENCLAVE_INIT

On Thursday, 2020-07-16 at 16:52:53 +03, Jarkko Sakkinen wrote:
> Add an ioctl that performs ENCLS[EINIT], which locks down the measurement
> and initializes the enclave for entrance. After this, new pages can no
> longer be added.
>
> Acked-by: Jethro Beekman <[email protected]>
> Tested-by: Jethro Beekman <[email protected]>
> Tested-by: Haitao Huang <[email protected]>
> Tested-by: Chunyang Hui <[email protected]>
> Tested-by: Jordan Hand <[email protected]>
> Tested-by: Nathaniel McCallum <[email protected]>
> Tested-by: Seth Moore <[email protected]>

Tested-by: Darren Kenny <[email protected]>
Reviewed-by: Darren Kenny <[email protected]>

> Co-developed-by: Sean Christopherson <[email protected]>
> Signed-off-by: Sean Christopherson <[email protected]>
> Co-developed-by: Suresh Siddha <[email protected]>
> Signed-off-by: Suresh Siddha <[email protected]>
> Signed-off-by: Jarkko Sakkinen <[email protected]>
> ---
> arch/x86/include/uapi/asm/sgx.h | 11 ++
> arch/x86/kernel/cpu/sgx/ioctl.c | 188 ++++++++++++++++++++++++++++++++
> 2 files changed, 199 insertions(+)
>
> diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
> index c8f199b3fb6f..5edb08ab8fd0 100644
> --- a/arch/x86/include/uapi/asm/sgx.h
> +++ b/arch/x86/include/uapi/asm/sgx.h
> @@ -23,6 +23,8 @@ enum sgx_page_flags {
> _IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
> #define SGX_IOC_ENCLAVE_ADD_PAGES \
> _IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_pages)
> +#define SGX_IOC_ENCLAVE_INIT \
> + _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
>
> /**
> * struct sgx_enclave_create - parameter structure for the
> @@ -52,4 +54,13 @@ struct sgx_enclave_add_pages {
> __u64 count;
> };
>
> +/**
> + * struct sgx_enclave_init - parameter structure for the
> + * %SGX_IOC_ENCLAVE_INIT ioctl
> + * @sigstruct: address for the SIGSTRUCT data
> + */
> +struct sgx_enclave_init {
> + __u64 sigstruct;
> +};
> +
> #endif /* _UAPI_ASM_X86_SGX_H */
> diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
> index c63a51362d14..3444de955191 100644
> --- a/arch/x86/kernel/cpu/sgx/ioctl.c
> +++ b/arch/x86/kernel/cpu/sgx/ioctl.c
> @@ -16,6 +16,9 @@
> #include "encl.h"
> #include "encls.h"
>
> +/* A per-cpu cache for the last known values of IA32_SGXLEPUBKEYHASHx MSRs. */
> +static DEFINE_PER_CPU(u64 [4], sgx_lepubkeyhash_cache);
> +
> static u32 sgx_calc_ssa_frame_size(u32 miscselect, u64 xfrm)
> {
> u32 size_max = PAGE_SIZE;
> @@ -485,6 +488,188 @@ static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg)
> return ret;
> }
>
> +static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,
> + void *hash)
> +{
> + SHASH_DESC_ON_STACK(shash, tfm);
> +
> + shash->tfm = tfm;
> +
> + return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
> +}
> +
> +static int sgx_get_key_hash(const void *modulus, void *hash)
> +{
> + struct crypto_shash *tfm;
> + int ret;
> +
> + tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
> + if (IS_ERR(tfm))
> + return PTR_ERR(tfm);
> +
> + ret = __sgx_get_key_hash(tfm, modulus, hash);
> +
> + crypto_free_shash(tfm);
> + return ret;
> +}
> +
> +static void sgx_update_lepubkeyhash_msrs(u64 *lepubkeyhash, bool enforce)
> +{
> + u64 *cache;
> + int i;
> +
> + cache = per_cpu(sgx_lepubkeyhash_cache, smp_processor_id());
> + for (i = 0; i < 4; i++) {
> + if (enforce || (lepubkeyhash[i] != cache[i])) {
> + wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
> + cache[i] = lepubkeyhash[i];
> + }
> + }
> +}
> +
> +static int sgx_einit(struct sgx_sigstruct *sigstruct, void *token,
> + struct sgx_epc_page *secs, u64 *lepubkeyhash)
> +{
> + int ret;
> +
> + preempt_disable();
> + sgx_update_lepubkeyhash_msrs(lepubkeyhash, false);
> + ret = __einit(sigstruct, token, sgx_get_epc_addr(secs));
> + if (ret == SGX_INVALID_EINITTOKEN) {
> + sgx_update_lepubkeyhash_msrs(lepubkeyhash, true);
> + ret = __einit(sigstruct, token, sgx_get_epc_addr(secs));
> + }
> + preempt_enable();
> + return ret;
> +}
> +
> +static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
> + void *token)
> +{
> + u64 mrsigner[4];
> + int ret;
> + int i;
> + int j;
> +
> + /* Check that the required attributes have been authorized. */
> + if (encl->secs_attributes & ~encl->allowed_attributes)
> + return -EACCES;
> +
> + ret = sgx_get_key_hash(sigstruct->modulus, mrsigner);
> + if (ret)
> + return ret;
> +
> + mutex_lock(&encl->lock);
> +
> + /*
> + * Periodically, EINIT polls for certain asynchronous events. If such an
> + * event is detected, it completes with SGX_UNMSKED_EVENT.
> + */
> + for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
> + for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
> + ret = sgx_einit(sigstruct, token, encl->secs.epc_page,
> + mrsigner);
> + if (ret == SGX_UNMASKED_EVENT)
> + continue;
> + else
> + break;
> + }
> +
> + if (ret != SGX_UNMASKED_EVENT)
> + break;
> +
> + msleep_interruptible(SGX_EINIT_SLEEP_TIME);
> +
> + if (signal_pending(current)) {
> + ret = -ERESTARTSYS;
> + goto err_out;
> + }
> + }
> +
> + if (ret & ENCLS_FAULT_FLAG) {
> + if (encls_failed(ret))
> + ENCLS_WARN(ret, "EINIT");
> +
> + sgx_encl_destroy(encl);
> + ret = -EFAULT;
> + } else if (ret) {
> + pr_debug("EINIT returned %d\n", ret);
> + ret = -EPERM;
> + } else {
> + atomic_or(SGX_ENCL_INITIALIZED, &encl->flags);
> + }
> +
> +err_out:
> + mutex_unlock(&encl->lock);
> + return ret;
> +}
> +
> +/**
> + * sgx_ioc_enclave_init - handler for %SGX_IOC_ENCLAVE_INIT
> + *
> + * @filep: open file to /dev/sgx
> + * @arg: userspace pointer to a struct sgx_enclave_init instance
> + *
> + * Flush any outstanding enqueued EADD operations and perform EINIT. The
> + * Launch Enclave Public Key Hash MSRs are rewritten as necessary to match
> + * the enclave's MRSIGNER, which is caculated from the provided sigstruct.
> + *
> + * Return:
> + * 0 on success,
> + * SGX error code on EINIT failure,
> + * -errno otherwise
> + */
> +static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
> +{
> + struct sgx_sigstruct *sigstruct;
> + struct sgx_enclave_init einit;
> + struct page *initp_page;
> + void *token;
> + int ret;
> +
> + if ((atomic_read(&encl->flags) & SGX_ENCL_INITIALIZED) ||
> + !(atomic_read(&encl->flags) & SGX_ENCL_CREATED))
> + return -EINVAL;
> +
> + if (copy_from_user(&einit, arg, sizeof(einit)))
> + return -EFAULT;
> +
> + initp_page = alloc_page(GFP_KERNEL);
> + if (!initp_page)
> + return -ENOMEM;
> +
> + sigstruct = kmap(initp_page);
> + token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2);
> + memset(token, 0, SGX_LAUNCH_TOKEN_SIZE);
> +
> + if (copy_from_user(sigstruct, (void __user *)einit.sigstruct,
> + sizeof(*sigstruct))) {
> + ret = -EFAULT;
> + goto out;
> + }
> +
> + /*
> + * A legacy field used with Intel signed enclaves. These used to mean
> + * regular and architectural enclaves. The CPU only accepts these values
> + * but they do not have any other meaning.
> + *
> + * Thus, reject any other values.
> + */
> + if (sigstruct->header.vendor != 0x0000 &&
> + sigstruct->header.vendor != 0x8086) {
> + ret = -EINVAL;
> + goto out;
> + }
> +
> + ret = sgx_encl_init(encl, sigstruct, token);
> +
> +out:
> + kunmap(initp_page);
> + __free_page(initp_page);
> + return ret;
> +}
> +
> +
> long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
> {
> struct sgx_encl *encl = filep->private_data;
> @@ -506,6 +691,9 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
> case SGX_IOC_ENCLAVE_ADD_PAGES:
> ret = sgx_ioc_enclave_add_pages(encl, (void __user *)arg);
> break;
> + case SGX_IOC_ENCLAVE_INIT:
> + ret = sgx_ioc_enclave_init(encl, (void __user *)arg);
> + break;
> default:
> ret = -ENOIOCTLCMD;
> break;
> --
> 2.25.1