Add an ioctl that performs ENCLS[EINIT], which locks down the measurement
and initializes the enclave for entrance. After this, new pages can no
longer be added.
Acked-by: Jethro Beekman <[email protected]>
Tested-by: Jethro Beekman <[email protected]>
Tested-by: Haitao Huang <[email protected]>
Tested-by: Chunyang Hui <[email protected]>
Tested-by: Jordan Hand <[email protected]>
Tested-by: Nathaniel McCallum <[email protected]>
Tested-by: Seth Moore <[email protected]>
Tested-by: Darren Kenny <[email protected]>
Reviewed-by: Darren Kenny <[email protected]>
Co-developed-by: Sean Christopherson <[email protected]>
Signed-off-by: Sean Christopherson <[email protected]>
Co-developed-by: Suresh Siddha <[email protected]>
Signed-off-by: Suresh Siddha <[email protected]>
Signed-off-by: Jarkko Sakkinen <[email protected]>
---
arch/x86/include/uapi/asm/sgx.h | 11 ++
arch/x86/kernel/cpu/sgx/encl.h | 2 +
arch/x86/kernel/cpu/sgx/ioctl.c | 195 ++++++++++++++++++++++++++++++++
3 files changed, 208 insertions(+)
diff --git a/arch/x86/include/uapi/asm/sgx.h b/arch/x86/include/uapi/asm/sgx.h
index c42a2ad3ca0b..7729730d8580 100644
--- a/arch/x86/include/uapi/asm/sgx.h
+++ b/arch/x86/include/uapi/asm/sgx.h
@@ -23,6 +23,8 @@ enum sgx_page_flags {
_IOW(SGX_MAGIC, 0x00, struct sgx_enclave_create)
#define SGX_IOC_ENCLAVE_ADD_PAGES \
_IOWR(SGX_MAGIC, 0x01, struct sgx_enclave_add_pages)
+#define SGX_IOC_ENCLAVE_INIT \
+ _IOW(SGX_MAGIC, 0x02, struct sgx_enclave_init)
/**
* struct sgx_enclave_create - parameter structure for the
@@ -50,4 +52,13 @@ struct sgx_enclave_add_pages {
__u64 flags;
};
+/**
+ * struct sgx_enclave_init - parameter structure for the
+ * %SGX_IOC_ENCLAVE_INIT ioctl
+ * @sigstruct: address for the SIGSTRUCT data
+ */
+struct sgx_enclave_init {
+ __u64 sigstruct;
+};
+
#endif /* _UAPI_ASM_X86_SGX_H */
diff --git a/arch/x86/kernel/cpu/sgx/encl.h b/arch/x86/kernel/cpu/sgx/encl.h
index 8ff445476657..0448d22d3010 100644
--- a/arch/x86/kernel/cpu/sgx/encl.h
+++ b/arch/x86/kernel/cpu/sgx/encl.h
@@ -70,6 +70,8 @@ struct sgx_encl {
struct xarray page_array;
struct sgx_encl_page secs;
cpumask_t cpumask;
+ unsigned long attributes;
+ unsigned long attributes_mask;
};
extern const struct vm_operations_struct sgx_vm_ops;
diff --git a/arch/x86/kernel/cpu/sgx/ioctl.c b/arch/x86/kernel/cpu/sgx/ioctl.c
index 5dcced1df259..51d9b24379ff 100644
--- a/arch/x86/kernel/cpu/sgx/ioctl.c
+++ b/arch/x86/kernel/cpu/sgx/ioctl.c
@@ -16,6 +16,9 @@
#include "encl.h"
#include "encls.h"
+/* A per-cpu cache for the last known values of IA32_SGXLEPUBKEYHASHx MSRs. */
+static DEFINE_PER_CPU(u64 [4], sgx_lepubkeyhash_cache);
+
static u32 sgx_calc_ssa_frame_size(u32 miscselect, u64 xfrm)
{
u32 size_max = PAGE_SIZE;
@@ -127,6 +130,9 @@ static int sgx_encl_create(struct sgx_encl *encl, struct sgx_secs *secs)
encl->base = secs->base;
encl->size = secs->size;
encl->ssaframesize = secs->ssa_frame_size;
+ encl->attributes = secs->attributes;
+ encl->attributes_mask = SGX_ATTR_DEBUG | SGX_ATTR_MODE64BIT |
+ SGX_ATTR_KSS;
/*
* Set SGX_ENCL_CREATED only after the enclave is fully prepped. This
@@ -477,6 +483,192 @@ static long sgx_ioc_enclave_add_pages(struct sgx_encl *encl, void __user *arg)
return c;
}
+static int __sgx_get_key_hash(struct crypto_shash *tfm, const void *modulus,
+ void *hash)
+{
+ SHASH_DESC_ON_STACK(shash, tfm);
+
+ shash->tfm = tfm;
+
+ return crypto_shash_digest(shash, modulus, SGX_MODULUS_SIZE, hash);
+}
+
+static int sgx_get_key_hash(const void *modulus, void *hash)
+{
+ struct crypto_shash *tfm;
+ int ret;
+
+ tfm = crypto_alloc_shash("sha256", 0, CRYPTO_ALG_ASYNC);
+ if (IS_ERR(tfm))
+ return PTR_ERR(tfm);
+
+ ret = __sgx_get_key_hash(tfm, modulus, hash);
+
+ crypto_free_shash(tfm);
+ return ret;
+}
+
+static void sgx_update_lepubkeyhash_msrs(u64 *lepubkeyhash, bool enforce)
+{
+ u64 *cache;
+ int i;
+
+ cache = per_cpu(sgx_lepubkeyhash_cache, smp_processor_id());
+ for (i = 0; i < 4; i++) {
+ if (enforce || (lepubkeyhash[i] != cache[i])) {
+ wrmsrl(MSR_IA32_SGXLEPUBKEYHASH0 + i, lepubkeyhash[i]);
+ cache[i] = lepubkeyhash[i];
+ }
+ }
+}
+
+static int sgx_einit(struct sgx_sigstruct *sigstruct, void *token,
+ struct sgx_epc_page *secs, u64 *lepubkeyhash)
+{
+ int ret;
+
+ preempt_disable();
+ sgx_update_lepubkeyhash_msrs(lepubkeyhash, false);
+ ret = __einit(sigstruct, token, sgx_get_epc_addr(secs));
+ if (ret == SGX_INVALID_EINITTOKEN) {
+ sgx_update_lepubkeyhash_msrs(lepubkeyhash, true);
+ ret = __einit(sigstruct, token, sgx_get_epc_addr(secs));
+ }
+ preempt_enable();
+ return ret;
+}
+
+static int sgx_encl_init(struct sgx_encl *encl, struct sgx_sigstruct *sigstruct,
+ void *token)
+{
+ u64 mrsigner[4];
+ int ret;
+ int i;
+ int j;
+
+ /* Deny initializing enclaves with attributes (namely provisioning)
+ * that have not been explicitly allowed.
+ */
+ if (encl->attributes & ~encl->attributes_mask)
+ return -EACCES;
+
+ ret = sgx_get_key_hash(sigstruct->modulus, mrsigner);
+ if (ret)
+ return ret;
+
+ mutex_lock(&encl->lock);
+
+ /*
+ * ENCLS[EINIT] is interruptible because it has such a high latency,
+ * e.g. 50k+ cycles on success. If an IRQ/NMI/SMI becomes pending,
+ * EINIT may fail with SGX_UNMASKED_EVENT so that the event can be
+ * serviced.
+ */
+ for (i = 0; i < SGX_EINIT_SLEEP_COUNT; i++) {
+ for (j = 0; j < SGX_EINIT_SPIN_COUNT; j++) {
+ ret = sgx_einit(sigstruct, token, encl->secs.epc_page,
+ mrsigner);
+ if (ret == SGX_UNMASKED_EVENT)
+ continue;
+ else
+ break;
+ }
+
+ if (ret != SGX_UNMASKED_EVENT)
+ break;
+
+ msleep_interruptible(SGX_EINIT_SLEEP_TIME);
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ goto err_out;
+ }
+ }
+
+ if (ret & ENCLS_FAULT_FLAG) {
+ if (encls_failed(ret))
+ ENCLS_WARN(ret, "EINIT");
+
+ sgx_encl_destroy(encl);
+ ret = -EFAULT;
+ } else if (ret) {
+ pr_debug("EINIT returned %d\n", ret);
+ ret = -EPERM;
+ } else {
+ atomic_or(SGX_ENCL_INITIALIZED, &encl->flags);
+ }
+
+err_out:
+ mutex_unlock(&encl->lock);
+ return ret;
+}
+
+/**
+ * sgx_ioc_enclave_init - handler for %SGX_IOC_ENCLAVE_INIT
+ *
+ * @filep: open file to /dev/sgx
+ * @arg: userspace pointer to a struct sgx_enclave_init instance
+ *
+ * Flush any outstanding enqueued EADD operations and perform EINIT. The
+ * Launch Enclave Public Key Hash MSRs are rewritten as necessary to match
+ * the enclave's MRSIGNER, which is caculated from the provided sigstruct.
+ *
+ * Return:
+ * 0 on success,
+ * SGX error code on EINIT failure,
+ * -errno otherwise
+ */
+static long sgx_ioc_enclave_init(struct sgx_encl *encl, void __user *arg)
+{
+ struct sgx_sigstruct *sigstruct;
+ struct sgx_enclave_init einit;
+ struct page *initp_page;
+ void *token;
+ int ret;
+
+ if ((atomic_read(&encl->flags) & SGX_ENCL_INITIALIZED) ||
+ !(atomic_read(&encl->flags) & SGX_ENCL_CREATED))
+ return -EINVAL;
+
+ if (copy_from_user(&einit, arg, sizeof(einit)))
+ return -EFAULT;
+
+ initp_page = alloc_page(GFP_KERNEL);
+ if (!initp_page)
+ return -ENOMEM;
+
+ sigstruct = kmap(initp_page);
+ token = (void *)((unsigned long)sigstruct + PAGE_SIZE / 2);
+ memset(token, 0, SGX_LAUNCH_TOKEN_SIZE);
+
+ if (copy_from_user(sigstruct, (void __user *)einit.sigstruct,
+ sizeof(*sigstruct))) {
+ ret = -EFAULT;
+ goto out;
+ }
+
+ /*
+ * A legacy field used with Intel signed enclaves. These used to mean
+ * regular and architectural enclaves. The CPU only accepts these values
+ * but they do not have any other meaning.
+ *
+ * Thus, reject any other values.
+ */
+ if (sigstruct->header.vendor != 0x0000 &&
+ sigstruct->header.vendor != 0x8086) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = sgx_encl_init(encl, sigstruct, token);
+
+out:
+ kunmap(initp_page);
+ __free_page(initp_page);
+ return ret;
+}
+
+
long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
{
struct sgx_encl *encl = filep->private_data;
@@ -498,6 +690,9 @@ long sgx_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
case SGX_IOC_ENCLAVE_ADD_PAGES:
ret = sgx_ioc_enclave_add_pages(encl, (void __user *)arg);
break;
+ case SGX_IOC_ENCLAVE_INIT:
+ ret = sgx_ioc_enclave_init(encl, (void __user *)arg);
+ break;
default:
ret = -ENOIOCTLCMD;
break;
--
2.25.1