2008-11-17 23:31:51

by Shasi Pulijala

[permalink] [raw]
Subject: [PATCH 1/1 v8] Add CryptoAPI User Interface Support v8

Hi Evgeniy,

This patch v8 includes the code that prevents synchronous calls from freeing data when request completion is interrupted, while the data may still be accessed by the crypto code in parallel.
From: Shasi Pulijala <[email protected]>


Signed-off-by: Shasi Pulijala <[email protected]>
Acked-by: Loc Ho <[email protected]>
---
crypto/cryptodev.c | 1858 +++++++++++++++++++++++++++++++++++++++++++++
include/linux/cryptodev.h | 79 ++
2 files changed, 1937 insertions(+), 0 deletions(-)
create mode 100644 crypto/cryptodev.c
create mode 100644 include/linux/cryptodev.h

diff --git a/crypto/cryptodev.c b/crypto/cryptodev.c
new file mode 100644
index 0000000..a579863
--- /dev/null
+++ b/crypto/cryptodev.c
@@ -0,0 +1,1858 @@
+/**************************************************************************
+ * Linux CryptoAPI user space interface module
+ *
+ * Copyright (c) 2008 Applied Micro Circuits Corporation.
+ * All rights reserved. Shasi Pulijala <[email protected]>
+ * Loc Ho <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * @file cryptodev.h
+ *
+ * This file defines ioctl structures for the Linux CryptoAPI interface. It
+ * provides user space applications accesss into the Linux CryptoAPI
+ * functionalities.
+ *
+ **************************************************************************
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/pagemap.h>
+#include <linux/miscdevice.h>
+#include <linux/ioctl.h>
+#include <linux/scatterlist.h>
+#include <linux/cryptodev.h>
+#include <linux/aio.h>
+#include <linux/mutex.h>
+#include <asm/atomic.h>
+#include <crypto/aead.h>
+#include <crypto/authenc.h>
+#include <crypto/hash.h>
+#include <crypto/internal/hash.h>
+
+/* /dev/crypto is a char block device with major 10 and minor below */
+#define CRYPTODEV_MINOR 70
+
+/* Debug Mode Setting */
+#define CRYPTODEV_DEBUG
+
+/* Version Number */
+#define CRYPTODEV_VER "0.1"
+
+/*Pin Max and Min Sizes*/
+#define PAGE_PIN_MIN_SIZE 64
+#define PAGE_PIN_MAX_SIZE (48 * 1024)
+
+static int debug;
+module_param(debug, int, 0644);
+MODULE_PARM_DESC(debug, "0: normal, 1: verbose, 2: debug");
+
+static int sg_single;
+module_param(sg_single, int, 0644);
+MODULE_PARM_DESC(sg_single, "0: scatter user buffers to page size, "
+ "1: single buffer for user buffer");
+
+static int page_pin_min_size = PAGE_PIN_MIN_SIZE;
+module_param(page_pin_min_size, int, 0644);
+MODULE_PARM_DESC(page_pin_min_size,
+ "min value to decide copy to/from user or pin pages");
+
+static int page_pin_max_size = PAGE_PIN_MAX_SIZE;
+module_param(page_pin_max_size, int, 0644);
+MODULE_PARM_DESC(page_pin_max_size,
+ "max value to decide copy to/from user or pin pages");
+
+#ifdef CRYPTODEV_STATS
+static int enable_stats;
+module_param(enable_stats, int, 0644);
+MODULE_PARM_DESC(enable_stats, "collect statictics about cryptodev usage");
+#endif
+
+#define PFX "cryptodev: "
+
+#ifndef CRYPTODEV_DEBUG
+#define CD_HEXDUMP(b, l) \
+ print_hex_dump(KERN_CONT, "", DUMP_PREFIX_OFFSET, \
+ 16, 1, (b), (l), false);
+#define CDPRINTK(level, severity, format, a...) \
+ do { \
+ if (level <= debug) \
+ printk(severity PFX "%s[%u]: " format, \
+ current->comm, current->pid, ##a); \
+ } while (0)
+#else
+#define CD_HEXDUMP(b, l)
+#define CDPRINTK(level, severity, format, a...)
+#endif
+
+#define CRYPTO_MODE_NOTSET 0
+#define CRYPTO_MODE_ACIPHER 1
+#define CRYPTO_MODE_AHASH 2
+#define CRYPTO_MODE_AEAD 3
+
+struct crypto_item_op {
+ struct crypt_op *udata;
+ char *iv;
+ char *assoc;
+ char __user *src_data;
+ char __user *dst_data;
+ u16 src_size;
+};
+
+#define iv_len udata->iv_size
+#define assoc_len udata->assoc_size
+#define eop udata->op
+
+#define tfm_ablkcipher crt_tfm.acipher_tfm
+#define tfm_aead crt_tfm.aead_tfm
+#define tfm_ahash crt_tfm.ahash_tfm
+
+struct csession {
+ atomic_t refcnt;
+ int mode; /* See CRYPTO_MODE_XXX */
+ union {
+ struct crypto_ablkcipher *acipher_tfm;
+ struct crypto_ahash *ahash_tfm;
+ struct crypto_aead *aead_tfm;
+ } crt_tfm;
+ int (*destroy)(struct csession *ses_ptr);
+ int (*runop)(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb);
+ int (*getivsize)(struct csession *ses_ptr);
+ int (*setkey)(struct csession *ses_ptr, char *key, int key_size);
+
+};
+
+struct cryptodev_ctx {
+ struct csession *session;
+ struct mutex lock;
+};
+
+#define crypto_completion async.syncio.completion
+#define session_ptr async.aio.ses_ptr
+#define iocbvec async.aio.iocb
+#define nopin_data async.aio.data
+#define aio_enc async.aio.enc
+#define aio_dst_data async.aio.dst_data
+#define aio_size_data async.aio.data_size
+struct async_result {
+ struct list_head next; /* Pending AIO requests ready for read */
+ int nr_spages;
+ int nr_dpages;
+ struct page **spages;
+ struct page **dpages;
+
+ char *null_buf;
+ void *udata;
+ atomic_t opcnt;
+
+ union {
+ struct {
+ struct csession *ses_ptr;
+ struct kiocb *iocb;
+ char *data;
+ char __user *dst_data;
+ int enc;
+ size_t data_size;
+ } aio;
+ struct {
+ struct completion completion;
+ } syncio;
+ } async;
+ int err;
+};
+
+static int cryptodev_run_acipher(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb);
+static int cryptodev_run_ahash(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb);
+static int cryptodev_run_aead(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb);
+static void cryptodev_async_aio_complete(struct crypto_async_request *req,
+ int err);
+
+/**
+ * Synchronous handling Routine
+ *
+ */
+static void cryptodev_destroy_res(struct async_result *result)
+{
+ if (result->null_buf)
+ kfree(result->null_buf);
+ if (result->udata)
+ kfree(result->udata);
+ kfree(result);
+}
+
+static void cryptodev_destroy_session(struct csession *ses_ptr)
+{
+ if (ses_ptr->destroy)
+ ses_ptr->destroy(ses_ptr);
+ kfree(ses_ptr);
+}
+
+static void cryptodev_release_pages(struct page **pages, int nr_pages)
+{
+ int x;
+ struct page *mpage;
+
+ for (x = 0; x < nr_pages; x++) {
+ mpage = pages[x];
+ SetPageDirty(mpage);
+ page_cache_release(mpage);
+ }
+}
+
+static void cryptodev_sync_complete(struct crypto_async_request *req,
+ int err)
+{
+ struct async_result *res;
+
+ CDPRINTK(2, KERN_INFO, "Synchrnous call-back Called\n");
+
+ res = req->data;
+
+ if (err == -EINPROGRESS)
+ return;
+
+ res->err = err;
+ if (atomic_dec_and_test(&res->opcnt)) {
+ cryptodev_release_pages(res->dpages, res->nr_dpages);
+ cryptodev_release_pages(res->spages, res->nr_spages);
+ cryptodev_destroy_res(res);
+ return;
+ }
+ complete(&(res->crypto_completion));
+}
+
+/**
+ * Destroy Alg Sessions
+ *
+ */
+int cryptodev_destroy_ablkcipher_tfm(struct csession *ses_ptr)
+{
+ CDPRINTK(1, KERN_INFO, "ABLKCIPHER sid %p deleting\n", ses_ptr);
+ crypto_free_ablkcipher(ses_ptr->tfm_ablkcipher);
+ return 0;
+}
+
+int cryptodev_destroy_ahash_tfm(struct csession *ses_ptr)
+{
+ CDPRINTK(1, KERN_INFO, "AHASH sid %p deleting\n", ses_ptr);
+ crypto_free_ahash(ses_ptr->tfm_ahash);
+ return 0;
+}
+
+int cryptodev_destroy_aead_tfm(struct csession *ses_ptr)
+{
+ CDPRINTK(1, KERN_INFO, "AEAD sid %p deleting\n", ses_ptr);
+ crypto_free_aead(ses_ptr->tfm_aead);
+ return 0;
+}
+
+/**
+ * ivsize return functions
+ *
+ */
+int cryptodev_ablkcipher_getivsize(struct csession *ses_ptr)
+{
+ return crypto_ablkcipher_ivsize(ses_ptr->tfm_ablkcipher);
+}
+
+int cryptodev_aead_getivsize(struct csession *ses_ptr)
+{
+ return crypto_aead_ivsize(ses_ptr->tfm_aead);
+}
+
+int cryptodev_ahash_getivsize(struct csession *ses_ptr)
+{
+ return 0;
+}
+
+/**
+ * setkey functions
+ *
+ */
+int cryptodev_ablkcipher_setkey(struct csession *ses_ptr, char *key,
+ int key_size)
+{
+ int ret;
+
+ ret = crypto_ablkcipher_setkey(ses_ptr->tfm_ablkcipher,
+ key, key_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key for %zu: flags=0x%X\n", key_size*8,
+ crypto_ablkcipher_get_flags(ses_ptr->tfm_ablkcipher));
+ printk(KERN_ERR PFX
+ "(see CRYPTO_TFM_RES_* in <linux/crypto.h> for "
+ "details)\n");
+
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int cryptodev_aead_setkey(struct csession *ses_ptr, char *key, int key_size)
+{
+ int ret;
+
+ ret = crypto_aead_setkey(ses_ptr->tfm_aead, key, key_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key field for %zu: flags=0x%X\n",
+ key_size * 8,
+ crypto_aead_get_flags(ses_ptr->tfm_aead));
+ printk(KERN_ERR PFX
+ "(see CRYPTO_TFM_RES_* in <linux/crypto.h> "
+ "for details)\n");
+
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+int cryptodev_ahash_setkey(struct csession *ses_ptr, char *key, int key_size)
+{
+ int ret = 0;
+
+ if (!key_size)
+ return ret;
+ ret = crypto_ahash_setkey(ses_ptr->tfm_ahash, key, key_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key field for %zu: flags=0x%X\n"
+ "(see CRYPTO_TFM_RES_* in "
+ "<linux/crypto.h> for details)\n",
+ key_size * 8,
+ crypto_ahash_get_flags(ses_ptr->tfm_ahash));
+
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+/**
+ * Routine for creating a session for AEAD type algorithm
+ *
+ */
+struct csession *create_session_aead(struct crypto_aead *tfm,
+ char *alg_name,
+ struct session_op *sop,
+ char *keyp)
+{
+ struct csession *ses_new;
+ int ret = 0;
+
+ crypto_aead_clear_flags(tfm, ~0);
+
+ ret = crypto_aead_setkey(tfm, keyp, sop->key_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key field for %s-%zu: flags=0x%X\n",
+ alg_name, sop->key_size * 8,
+ crypto_aead_get_flags(tfm));
+ printk(KERN_ERR PFX
+ "(see CRYPTO_TFM_RES_* in <linux/crypto.h> "
+ "for details)\n");
+
+ ret = -EINVAL;
+ goto free_aead;
+ }
+
+ ret = crypto_aead_setauthsize(tfm, sop->icv_size);
+ if (ret) {
+ printk(KERN_ERR "failed to set authsize = %u\n", sop->icv_size);
+ ret = -EINVAL;
+ goto free_aead;
+ }
+
+ ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
+ if (!ses_new) {
+ ret = -ENOMEM;
+ goto free_aead;
+ }
+ ses_new->tfm_aead = tfm;
+ ses_new->mode = CRYPTO_MODE_AEAD;
+ ses_new->destroy = cryptodev_destroy_aead_tfm;
+ ses_new->runop = cryptodev_run_aead;
+ ses_new->getivsize = cryptodev_aead_getivsize;
+ ses_new->setkey = cryptodev_aead_setkey;
+
+ atomic_set(&ses_new->refcnt, 1);
+
+ CDPRINTK(1, KERN_INFO, "AEAD sid %p alg %s created\n",
+ ses_new, alg_name);
+ return ses_new;
+
+free_aead:
+ crypto_free_aead(tfm);
+ return ERR_PTR(ret);
+}
+
+/**
+ * Routine for creating a session for HASH type algorithm
+ *
+ */
+struct csession *create_session_ahash(struct crypto_ahash *tfm,
+ char *alg_name,
+ struct session_op *sop,
+ char *keyp)
+{
+ struct csession *ses_new;
+ int ret = 0;
+
+
+ crypto_ahash_clear_flags(tfm, ~0);
+
+ /* Copy the key(hmac) from user and set to TFM. */
+ if (sop->hmackey_size) {
+ ret = crypto_ahash_setkey(tfm, keyp, sop->hmackey_size);
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key field for %s-%zu: "
+ "flags=0x%X\n"
+ "(see CRYPTO_TFM_RES_* in "
+ "<linux/crypto.h> for details)\n",
+ alg_name, sop->hmackey_size * 8,
+ crypto_ahash_get_flags(tfm));
+
+ ret = -EINVAL;
+ goto free_ahash;
+ }
+ }
+
+ ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
+ if (!ses_new) {
+ ret = -ENOMEM;
+ goto free_ahash;
+ }
+ ses_new->tfm_ahash = tfm;
+ ses_new->mode = CRYPTO_MODE_AHASH;
+ ses_new->destroy = cryptodev_destroy_ahash_tfm;
+ ses_new->runop = cryptodev_run_ahash;
+ ses_new->getivsize = cryptodev_ahash_getivsize;
+ ses_new->setkey = cryptodev_ahash_setkey;
+
+ atomic_set(&ses_new->refcnt, 1);
+
+ CDPRINTK(1, KERN_INFO, "AHASH sid %p alg %s created\n",
+ ses_new, alg_name);
+ return ses_new;
+
+free_ahash:
+ crypto_free_ahash(tfm);
+ return ERR_PTR(ret);
+}
+
+/**
+ * Routine for creating a session for CRYPTO block type algorithm
+ *
+ */
+struct csession *create_session_ablkcipher(struct crypto_ablkcipher *tfm,
+ char *alg_name, struct session_op *sop,
+ char *keyp)
+{
+ struct csession *ses_new;
+ int ret = 0;
+
+ crypto_ablkcipher_clear_flags(tfm, ~0);
+
+ /* Copy the key from user and set to TFM. */
+ ret = crypto_ablkcipher_setkey(tfm, keyp, sop->key_size);
+
+ if (ret) {
+ printk(KERN_ERR PFX
+ "failed to set key for %s-%zu: flags=0x%X\n",
+ alg_name, sop->key_size*8,
+ crypto_ablkcipher_get_flags(tfm));
+ printk(KERN_ERR PFX
+ "(see CRYPTO_TFM_RES_* in <linux/crypto.h> for "
+ "details)\n");
+
+ ret = -EINVAL;
+ goto free_ablkcipher;
+ }
+
+ ses_new = kzalloc(sizeof(*ses_new), GFP_KERNEL);
+ if (!ses_new) {
+ ret = -ENOMEM;
+ goto free_ablkcipher;
+ }
+
+ ses_new->tfm_ablkcipher = tfm;
+ ses_new->mode = CRYPTO_MODE_ACIPHER;
+ ses_new->destroy = cryptodev_destroy_ablkcipher_tfm;
+ ses_new->runop = cryptodev_run_acipher;
+ ses_new->getivsize = cryptodev_ablkcipher_getivsize;
+ ses_new->setkey = cryptodev_ablkcipher_setkey;
+
+ atomic_set(&ses_new->refcnt, 1);
+
+ CDPRINTK(1, KERN_INFO, "ABLCKCIPHER sid %p alg %s created\n",
+ ses_new, alg_name);
+ return ses_new;
+
+free_ablkcipher:
+ crypto_free_ablkcipher(tfm);
+ return ERR_PTR(ret);
+}
+
+/**
+ * Prepare session for future use
+ *
+ */
+struct csession *cryptodev_create_session(struct session_op *sop,
+ void *session_udata)
+{
+ char *alg_name;
+ char *key;
+ char *hmac_key;
+ struct crypto_ablkcipher *ablkcipher_tfm;
+ struct crypto_aead *aead_tfm;
+ struct crypto_ahash *ahash_tfm;
+ int ret;
+
+ alg_name = (char *) session_udata;
+ key = alg_name + sop->algo_size;
+ hmac_key = key + sop->key_size;
+
+ ahash_tfm = crypto_alloc_ahash(alg_name, 0, 0);
+ if (!IS_ERR(ahash_tfm))
+ return create_session_ahash(ahash_tfm, alg_name, sop,
+ hmac_key);
+ ablkcipher_tfm = crypto_alloc_ablkcipher(alg_name, 0, 0);
+ if (!IS_ERR(ablkcipher_tfm))
+ return create_session_ablkcipher(ablkcipher_tfm,
+ alg_name, sop, key);
+ aead_tfm = crypto_alloc_aead(alg_name, 0, 0);
+ if (!IS_ERR(aead_tfm))
+ return create_session_aead(aead_tfm, alg_name, sop,
+ key);
+
+ printk(KERN_ERR PFX "un-supported algorithm %s\n", alg_name);
+ ret = -EINVAL;
+ return ERR_PTR(ret);
+}
+
+/**
+ * Helper Functions for Page Creation and deletion.
+ *
+ */
+static int cryptodev_num_pages(unsigned long data, size_t bufsize)
+{
+ unsigned long first;
+ unsigned long last;
+ int num_pages;
+
+ if (!bufsize)
+ return 1;
+ first = (data & PAGE_MASK) >> PAGE_SHIFT;
+ last = ((data + bufsize - 1) & PAGE_MASK) >> PAGE_SHIFT;
+ num_pages = last - first + 1;
+ return num_pages;
+}
+
+static int cryptodev_set_user_pages(char __user *src, struct scatterlist *sg,
+ struct page **pages, size_t bufsize,
+ int *nr_pages, char **null_buf)
+{
+ unsigned long offset;
+ struct page *page = NULL;
+ int x;
+ int rop;
+ int err;
+
+ if (!src) {
+ *nr_pages = 0;
+ CDPRINTK(1, KERN_INFO, "Case of null buffer\n");
+ *null_buf = kzalloc(bufsize, GFP_KERNEL);
+ if (!*null_buf)
+ return -ENOMEM;
+ sg_init_one(&sg[0], *null_buf, bufsize);
+ return 0;
+ }
+
+ offset = (unsigned long) src & ~PAGE_MASK;
+ if (!pages) {
+ printk(KERN_ERR PFX "pages memory allocation failed\n");
+ return -ENOMEM;
+ }
+
+ down_read(&current->mm->mmap_sem);
+ err = get_user_pages(current, current->mm,
+ ((unsigned long) src) & PAGE_MASK,
+ *nr_pages, 1, 0, /* read, force */ pages, NULL);
+ up_read(&current->mm->mmap_sem);
+
+ if (err != *nr_pages) {
+ printk(KERN_ERR PFX "pages requested[%d] !="
+ " pages granted[%d]\n", *nr_pages, err);
+ return err < 0 ? err : -EINVAL;
+
+ }
+
+ if (sg_single) {
+ page = pages[0];
+ CDPRINTK(2, KERN_INFO, "single buffer implementation\n");
+ sg_set_page(&sg[0], page, bufsize, offset);
+ return 0;
+ }
+
+ sg_init_table(sg, *nr_pages);
+ for (x = 0; x < *nr_pages; x++) {
+ page = pages[x] ;
+ if (!page || IS_ERR(page)) {
+ printk(KERN_ERR PFX "missing page in "
+ "DumpUserPages %d\n", x);
+ return -EFAULT;
+ }
+ sg_set_page(&sg[x], page, bufsize, offset);
+ rop = PAGE_SIZE - sg[x].offset;
+ if (bufsize > rop) {
+ sg[x].length = rop;
+ bufsize = bufsize - rop;
+ }
+ offset = 0;
+ }
+
+ return 0;
+}
+
+static void cryptodev_sg_setbuf(unsigned char *data, size_t bufsize,
+ struct scatterlist *sg, int sg_num)
+{
+ int remainder_of_page;
+ int i = 0;
+
+ sg_init_table(sg, sg_num);
+ while (bufsize > 0 && i < sg_num) {
+ sg_set_buf(&sg[i], data, bufsize);
+ remainder_of_page = PAGE_SIZE - sg[i].offset;
+ if (bufsize > remainder_of_page) {
+ /* the buffer was split over multiple pages */
+ sg[i].length = remainder_of_page;
+ bufsize -= remainder_of_page;
+ data += remainder_of_page;
+ } else {
+ bufsize = 0;
+ }
+ i++;
+ }
+}
+
+/**
+ * Helper Functions for the AEAD mode
+ *
+ */
+static void *aead_alloc_tmp(struct crypto_aead *aead, int sg_size,
+ int ssg_num, int nopin, size_t bufsize)
+{
+ unsigned int len;
+
+ len = sizeof(struct async_result) +
+ (crypto_aead_alignmask(aead) &
+ ~(crypto_tfm_ctx_alignment() - 1));
+ len = ALIGN(len, crypto_tfm_ctx_alignment());
+
+ len += sizeof(struct aead_request) + crypto_aead_reqsize(aead);
+
+ if (nopin) {
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += sizeof(struct scatterlist) * ssg_num;
+ len += bufsize;
+
+ return kzalloc(len, GFP_KERNEL);
+ }
+
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += sizeof(struct scatterlist) * (sg_size);
+
+ len = ALIGN(len, __alignof__(struct page *));
+ len += sizeof(struct page *) * sg_size;
+
+ return kzalloc(len, GFP_KERNEL);
+}
+
+static inline struct aead_request *aead_result_req(
+ struct crypto_aead *aead,
+ struct async_result *result)
+{
+ struct aead_request *req;
+
+ req = (struct aead_request *) PTR_ALIGN(
+ (unsigned long) result +
+ sizeof(struct async_result),
+ crypto_tfm_ctx_alignment());
+ aead_request_set_tfm(req, aead);
+ return req;
+}
+
+static inline struct scatterlist *aead_req_ssg(struct crypto_aead *aead,
+ struct aead_request *req)
+{
+ return (struct scatterlist *) ALIGN((unsigned long) (req + 1) +
+ crypto_aead_reqsize(aead),
+ __alignof__(struct scatterlist));
+}
+
+static inline char *aead_ssg_data(struct scatterlist *ssg, int ssg_num)
+{
+ return (char *) ((unsigned long) ssg + sizeof(struct scatterlist)
+ * ssg_num);
+}
+
+static inline struct page **aead_ssg_spages(struct scatterlist *sg,
+ int sg_size)
+{
+ return (struct page **) ALIGN((unsigned long) sg +
+ sizeof(struct scatterlist) * sg_size,
+ __alignof__(struct page *));
+}
+
+static inline struct scatterlist *aead_spages_dsg(struct page **pages,
+ int npages)
+{
+ return (struct scatterlist *) ALIGN((unsigned long) pages +
+ sizeof(struct page *) * npages,
+ __alignof__(struct scatterlist));
+}
+
+static inline struct page **aead_dsg_dpages(struct scatterlist *dsg,
+ int sg_size)
+{
+ return (struct page **) ALIGN((unsigned long) dsg +
+ sizeof(struct scatterlist) * sg_size,
+ __alignof__(struct page *));
+}
+
+static inline int cryptodev_is_pin_pages(int size)
+{
+ int ret;
+ ret = (size <= page_pin_min_size || size >= page_pin_max_size);
+
+ return ret;
+}
+
+/**
+ * This is the actual aead function that implements
+ * the Combined mode
+ *
+ */
+static int cryptodev_run_aead(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb)
+{
+ void *tmp;
+ char __user *src;
+ char __user *dst;
+ char *data = NULL;
+ struct scatterlist *ssg;
+ struct scatterlist *dsg;
+ struct aead_request *req;
+ struct async_result *result = NULL;
+ size_t bufsize, authsize;
+ int nr_spages, nr_dpages = 0;
+ int ssg_num;
+ int enc, ret, dst_flag, nopin; /*Flags*/
+ struct scatterlist adata;
+
+ /* Setting the Input params */
+ bufsize = cop->src_size;
+ src = cop->src_data;
+ dst = cop->dst_data;
+ dst_flag = src != dst;
+ enc = cop->eop == COP_ENCRYPT ? 1 : 0;
+ authsize = crypto_aead_authsize(ses_ptr->tfm_aead);
+
+ ssg_num = cryptodev_num_pages((unsigned long) src,
+ enc ? bufsize + authsize : bufsize);
+
+ if (cop->eop && cop->eop != COP_ENCRYPT && cop->eop != COP_DECRYPT) {
+ printk(KERN_ERR PFX "sid %p invalid operation op=%u\n",
+ ses_ptr, cop->eop);
+ return -EINVAL;
+ }
+
+ if (bufsize > CRYPTO_MAX_DATA_LEN) {
+ printk(KERN_INFO PFX "Maximum Data Size Exceeded: %d > %d\n",
+ bufsize, CRYPTO_MAX_DATA_LEN);
+ return -E2BIG;
+ }
+
+ /* Flag set No Pinning pages, Size too large or too small*/
+ nopin = cryptodev_is_pin_pages(bufsize) ? 1 : 0;
+
+ if (dst_flag) {
+ if (nopin) {
+ nr_spages = nr_dpages = 0;
+ } else {
+ nr_spages = cryptodev_num_pages((unsigned long) src,
+ bufsize);
+ nr_dpages = cryptodev_num_pages((unsigned long) dst,
+ enc ? bufsize + authsize :
+ bufsize - authsize);
+ }
+ } else {
+ if (nopin)
+ nr_spages = 0;
+ else
+ nr_spages = cryptodev_num_pages((unsigned long) src,
+ enc ? bufsize + authsize :
+ bufsize);
+ }
+
+ tmp = aead_alloc_tmp(ses_ptr->tfm_aead,
+ dst_flag ? nr_spages + nr_dpages : nr_spages,
+ ssg_num, nopin, bufsize);
+ if (!tmp)
+ return -ENOMEM;
+
+ result = (struct async_result *) tmp;
+ req = aead_result_req(ses_ptr->tfm_aead, result);
+ ssg = aead_req_ssg(ses_ptr->tfm_aead, req);
+
+ if (nopin) {
+ data = aead_ssg_data(ssg, ssg_num);
+ if (src && copy_from_user(data, src, bufsize)) {
+ printk(KERN_ERR PFX
+ "failed to copy aead "
+ "cop data from user space\n");
+ kfree(tmp);
+ return -EINVAL;
+ }
+ cryptodev_sg_setbuf(data, enc ? bufsize + authsize : bufsize,
+ ssg, ssg_num);
+ dsg = ssg;
+ } else {
+ result->spages = aead_ssg_spages(ssg, nr_spages);
+
+ if (dst_flag) {
+ dsg = aead_spages_dsg(result->spages, nr_spages);
+ result->dpages = aead_dsg_dpages(dsg, nr_dpages);
+ ret = cryptodev_set_user_pages(src, ssg,
+ result->spages, bufsize,
+ &nr_spages,
+ &result->null_buf);
+
+ if (ret)
+ goto out_tmp;
+ ret = cryptodev_set_user_pages(dst, dsg,
+ result->dpages,
+ enc ? bufsize + authsize :
+ bufsize - authsize,
+ &nr_dpages,
+ &result->null_buf);
+ if (ret)
+ goto out_spages;
+ } else {
+ dsg = ssg;
+ result->dpages = result->spages;
+ ret = cryptodev_set_user_pages(src, ssg,
+ result->spages,
+ enc ? bufsize + authsize :
+ bufsize,
+ &nr_spages,
+ &result->null_buf);
+ if (ret)
+ goto out_tmp;
+ }
+ }
+
+ if (iocb) {
+ result->nr_spages = nr_spages;
+ result->nr_dpages = nr_dpages;
+ result->iocbvec = iocb;
+ result->nopin_data = data;
+ result->session_ptr = ses_ptr;
+ result->udata = (void *)cop->udata;
+ result->aio_enc = cop->eop;
+ result->aio_dst_data = dst;
+ result->aio_size_data = enc ? bufsize + authsize :
+ bufsize - authsize;
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ cryptodev_async_aio_complete,
+ result);
+ } else {
+ atomic_set(&result->opcnt, 2);
+ init_completion(&(result->crypto_completion));
+ aead_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ cryptodev_sync_complete,
+ result);
+ }
+
+ /* Additional Associated data */
+ sg_init_one(&adata, cop->assoc, cop->assoc_len);
+
+ aead_request_set_crypt(req, ssg, dsg, bufsize, cop->iv);
+ aead_request_set_assoc(req, &adata, cop->assoc_len);
+
+ atomic_inc(&ses_ptr->refcnt);
+
+ if (cop->eop == COP_ENCRYPT)
+ ret = crypto_aead_encrypt(req);
+ else
+ ret = crypto_aead_decrypt(req);
+
+ switch (ret) {
+ case 0:
+ if (!iocb)
+ atomic_dec(&result->opcnt);
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ if (iocb) {
+ CDPRINTK(2, KERN_INFO,
+ "Async Call AEAD:Returning Now\n");
+ return -EIOCBQUEUED;
+ }
+ ret = wait_for_completion_interruptible(
+ &result->crypto_completion);
+ if (!ret)
+ ret = result->err;
+ if (!ret) {
+ INIT_COMPLETION(result->crypto_completion);
+ break;
+ }
+ printk(KERN_ERR PFX "sid %p enc/dec failed error %d\n",
+ ses_ptr, -ret);
+ break;
+ default:
+ printk(KERN_ERR PFX "sid %p enc/dec failed error %d\n",
+ ses_ptr, -ret);
+ if (!iocb)
+ atomic_dec(&result->opcnt);
+ break;
+ }
+
+ if (nopin && !ret) {
+ if (copy_to_user(dst, data, enc ? bufsize + authsize :
+ bufsize - authsize))
+ printk(KERN_ERR PFX
+ "failed to copy encrypted data "
+ "to user space\n");
+ CD_HEXDUMP(data, enc ? bufsize + authsize :
+ bufsize - authsize);
+ }
+
+ /* Check if last reference */
+ if (atomic_dec_and_test(&ses_ptr->refcnt))
+ cryptodev_destroy_session(ses_ptr);
+ if (!iocb) {
+ if (atomic_dec_and_test(&result->opcnt))
+ goto out_dpages; /* cleanup */
+ else
+ return ret;
+ }
+out_dpages:
+ if (dst_flag)
+ cryptodev_release_pages(result->dpages, nr_dpages);
+out_spages:
+ cryptodev_release_pages(result->spages, nr_spages);
+out_tmp:
+ cryptodev_destroy_res(result);
+ return ret;
+}
+
+/**
+ * Helper Functions for the Hash mode
+ *
+ */
+static void *ahash_alloc_tmp(struct crypto_ahash *ahash, int sg_size,
+ size_t bufsize, int nopin,
+ int sg_num)
+{
+ unsigned int len;
+
+ len = sizeof(struct async_result);
+ len += sizeof(char) * 64;
+
+ len = ALIGN(len, crypto_tfm_ctx_alignment());
+ len += sizeof(struct ahash_request) + crypto_ahash_reqsize(ahash);
+
+ if (nopin) {
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += sizeof(struct scatterlist) * sg_num;
+ len += bufsize;
+
+ return kzalloc(len, GFP_KERNEL);
+ }
+
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += sizeof(struct scatterlist) * sg_size;
+
+ len = ALIGN(len, __alignof__(struct page *));
+ len += sizeof(struct page *) * sg_size;
+
+ return kzalloc(len, GFP_KERNEL);
+}
+
+static inline struct ahash_request *ahash_digest_req(
+ struct crypto_ahash *ahash,
+ char *digest)
+{
+ struct ahash_request *req;
+
+ req = (struct ahash_request *) PTR_ALIGN((digest + sizeof(char) * 64),
+ crypto_tfm_ctx_alignment());
+ ahash_request_set_tfm(req, ahash);
+ return req;
+
+}
+
+static inline struct scatterlist *ahash_req_sg(struct crypto_ahash *ahash,
+ struct ahash_request *req)
+{
+ return (struct scatterlist *) ALIGN((unsigned long)(req + 1) +
+ crypto_ahash_reqsize(ahash),
+ __alignof__(struct scatterlist));
+}
+
+static inline char *ahash_ssg_data(struct scatterlist *ssg, int ssg_num)
+{
+ return (char *) ((unsigned long) ssg + sizeof(struct scatterlist)
+ * ssg_num);
+}
+
+static inline struct page **ahash_sg_pages(struct scatterlist *sg,
+ int sg_size)
+{
+ return (struct page **) ALIGN((unsigned long)sg +
+ sizeof(struct scatterlist) * sg_size,
+ __alignof__(struct page *));
+}
+
+/**
+ * This is the actual hash function that creates the
+ * authenticated data
+ *
+ */
+static int cryptodev_run_ahash(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb)
+{
+ char __user *src;
+ char __user *mac;
+ struct scatterlist *ssg;
+ struct ahash_request *req;
+ struct async_result *result = NULL;
+ size_t authsize;
+ size_t bufsize;
+ int ret;
+ char *digest;
+ char *data = NULL;
+ void *tmp;
+ int nr_spages;
+ int nopin;
+ int ssg_num;
+
+ bufsize = cop->src_size;
+ src = cop->src_data;
+ mac = cop->dst_data;
+ ssg_num = cryptodev_num_pages((unsigned long) src, bufsize);
+
+ /* Checking the Input Length */
+ if (bufsize > CRYPTO_MAX_DATA_LEN) {
+ printk(KERN_INFO PFX "Maximum Data Size Exceeded: %d > %d\n",
+ bufsize, CRYPTO_MAX_DATA_LEN);
+ return -E2BIG;
+ }
+
+ /* Flag set No Pinning pages, Size too large or too small*/
+ nopin = cryptodev_is_pin_pages(bufsize) ? 1 : 0;
+
+ nr_spages = nopin ? 0 :
+ cryptodev_num_pages((unsigned long) src, bufsize);
+ authsize = crypto_ahash_digestsize(ses_ptr->tfm_ahash);
+
+ tmp = ahash_alloc_tmp(ses_ptr->tfm_ahash, nr_spages,
+ bufsize, nopin, ssg_num);
+ if (!tmp)
+ return -ENOMEM;
+
+ /* Setting the request, Digest, and sg */
+ result = (struct async_result *) tmp;
+ digest = (char *) ((unsigned long) result +
+ sizeof(struct async_result));
+ req = ahash_digest_req(ses_ptr->tfm_ahash, digest);
+ ssg = ahash_req_sg(ses_ptr->tfm_ahash, req);
+ if (nopin) {
+ data = ahash_ssg_data(ssg, ssg_num);
+ if (src && copy_from_user(data, src, bufsize)) {
+ printk(KERN_ERR PFX
+ "failed to copy hash data from user space\n");
+ kfree(tmp);
+ return -EINVAL;
+ }
+ cryptodev_sg_setbuf(data, bufsize, ssg, ssg_num);
+ } else {
+ result->spages = ahash_sg_pages(ssg, nr_spages);
+
+ ret = cryptodev_set_user_pages(src, ssg, result->spages,
+ bufsize, &nr_spages,
+ &result->null_buf);
+ if (ret)
+ goto out_tmp;
+ }
+
+ if (iocb) {
+ result->iocbvec = iocb;
+ result->nr_spages = nr_spages;
+ result->nr_dpages = 0;
+ result->session_ptr = ses_ptr;
+ result->aio_dst_data = mac;
+ result->udata = (void *)cop->udata;
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ cryptodev_async_aio_complete,
+ result);
+ } else {
+ atomic_set(&result->opcnt, 2);
+ init_completion(&(result->crypto_completion));
+ ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ cryptodev_sync_complete, result);
+ }
+
+ ahash_request_set_crypt(req, ssg, digest, bufsize);
+
+ atomic_inc(&ses_ptr->refcnt);
+
+ ret = crypto_ahash_digest(req);
+ switch (ret) {
+ case 0:
+ if (!iocb)
+ atomic_dec(&result->opcnt);
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ if (iocb) {
+ CDPRINTK(2, KERN_INFO,
+ "Async Call AHASH:Returning Now\n");
+ return -EIOCBRETRY;
+ }
+ ret = wait_for_completion_interruptible(
+ &result->crypto_completion);
+ if (!ret)
+ ret = result->err;
+ if (!ret) {
+ INIT_COMPLETION(result->crypto_completion);
+ break;
+ }
+ printk(KERN_ERR PFX "sid %p hash failed error %d\n",
+ ses_ptr, -ret);
+ break;
+ default:
+ if (!iocb)
+ atomic_dec(&result->opcnt);
+ printk(KERN_ERR PFX "sid %p digest failed error %d\n",
+ ses_ptr, -ret);
+ break;
+ }
+
+ if (!ret) {
+ CD_HEXDUMP(digest, authsize);
+ if (copy_to_user(mac, digest, authsize)) {
+ printk(KERN_ERR PFX "sid %p failed to copy mac data to"
+ "user space for hash\n", ses_ptr);
+ ret = -EFAULT;
+ }
+ }
+
+ /* Check if last reference */
+ if (atomic_dec_and_test(&ses_ptr->refcnt))
+ cryptodev_destroy_session(ses_ptr);
+ if (!iocb) {
+ if (atomic_dec_and_test(&result->opcnt))
+ goto out_pages; /* cleanup */
+ else
+ return ret;
+ }
+out_pages:
+ cryptodev_release_pages(result->spages, nr_spages);
+out_tmp:
+ cryptodev_destroy_res(result);
+ return ret;
+}
+
+/**
+ * Helper Functions for the Cipher mode
+ *
+ */
+static void *ablkcipher_alloc_tmp(struct crypto_ablkcipher *ablkcipher,
+ int sg_size, int nopin,
+ size_t bufsize, int ssg_num)
+{
+ unsigned int len;
+
+ len = sizeof(struct async_result) +
+ (crypto_ablkcipher_alignmask(ablkcipher) &
+ ~(crypto_tfm_ctx_alignment() - 1));
+ len = ALIGN(len, crypto_tfm_ctx_alignment());
+
+ len += sizeof(struct ablkcipher_request) +
+ crypto_ablkcipher_reqsize(ablkcipher);
+ if (nopin) {
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += sizeof(struct scatterlist) * ssg_num;
+ len += bufsize;
+
+ return kzalloc(len, GFP_KERNEL);
+ }
+
+ len = ALIGN(len, __alignof__(struct scatterlist));
+ len += sizeof(struct scatterlist) * sg_size;
+
+ len = ALIGN(len, __alignof__(struct page *));
+ len += sizeof(struct page *) * sg_size;
+
+ return kzalloc(len, GFP_KERNEL);
+}
+
+static inline struct ablkcipher_request *ablkcipher_result_req
+ (struct crypto_ablkcipher
+ *ablkcipher,
+ struct async_result
+ *result)
+{
+ struct ablkcipher_request *req;
+
+ req = (struct ablkcipher_request *) PTR_ALIGN(
+ (unsigned long) result +
+ sizeof(struct async_result),
+ crypto_tfm_ctx_alignment());
+ ablkcipher_request_set_tfm(req, ablkcipher);
+ return req;
+}
+
+static inline struct scatterlist *ablkcipher_req_sg(
+ struct crypto_ablkcipher *ablkcipher,
+ struct ablkcipher_request *req)
+{
+ return (struct scatterlist *) ALIGN((unsigned long) (req + 1) +
+ crypto_ablkcipher_reqsize(ablkcipher),
+ __alignof__(struct scatterlist));
+}
+
+static inline char *ablkcipher_ssg_data(struct scatterlist *ssg,
+ int ssg_num)
+{
+ return (char *) ((unsigned long) ssg +
+ sizeof(struct scatterlist) * ssg_num);
+}
+
+static inline struct page **ablkcipher_ssg_spages(struct scatterlist *ssg,
+ int sg_size)
+{
+ return (struct page **) ALIGN((unsigned long) ssg +
+ sizeof(struct scatterlist) * sg_size,
+ __alignof__(struct page *));
+}
+
+static inline struct scatterlist *ablkcipher_spages_dsg
+ (struct page **pages, int len)
+{
+ return (struct scatterlist *) ALIGN((unsigned long) pages +
+ sizeof(struct page *) * len,
+ __alignof__(struct scatterlist));
+}
+
+static inline struct page **ablkcipher_dsg_dpages(struct scatterlist *dsg,
+ int sg_size)
+{
+ return (struct page **) ALIGN((unsigned long) dsg +
+ sizeof(struct scatterlist) * sg_size,
+ __alignof__(struct page *));
+}
+
+/**
+ * This is the actual crypto function that creates the
+ * encrypted or decrypted data
+ *
+ */
+static int cryptodev_run_acipher(struct csession *ses_ptr,
+ struct crypto_item_op *cop,
+ struct kiocb *iocb)
+{
+ char __user *src;
+ char __user *dst;
+ void *tmp;
+ struct scatterlist *ssg;
+ struct scatterlist *dsg;
+ char *data = NULL;
+ struct ablkcipher_request *req;
+ struct async_result *result = NULL;
+ size_t bufsize;
+ int ret = 0;
+ int nr_spages;
+ int nr_dpages = 0;
+ int dst_flag, nopin;
+ int ssg_num;
+
+ /* Setting the Input params */
+ bufsize = cop->src_size;
+ src = cop->src_data;
+ dst = cop->dst_data;
+ dst_flag = src != dst;
+ ssg_num = cryptodev_num_pages((unsigned long) src, bufsize);
+
+ nopin = cryptodev_is_pin_pages(bufsize) ? 1 : 0;
+
+ if (cop->eop && cop->eop != COP_ENCRYPT && cop->eop != COP_DECRYPT) {
+ printk(KERN_ERR PFX "sid %p invalid operation op=%u\n",
+ ses_ptr, cop->eop);
+ return -EINVAL;
+ }
+
+ if (bufsize > CRYPTO_MAX_DATA_LEN) {
+ printk(KERN_INFO PFX "Maximum Data Size Exceeded: %d > %d\n",
+ bufsize, CRYPTO_MAX_DATA_LEN);
+ return -E2BIG;
+ }
+
+ if (bufsize % crypto_ablkcipher_blocksize(ses_ptr->tfm_ablkcipher)) {
+ printk(KERN_ERR PFX
+ "data size (%zu) isn't a multiple of block size (%u)\n",
+ bufsize,
+ crypto_ablkcipher_blocksize(ses_ptr->tfm_ablkcipher));
+ return -EINVAL;
+ }
+
+ nr_spages = nopin ? 0 :
+ cryptodev_num_pages((unsigned long) src, bufsize);
+ if (dst_flag)
+ nr_dpages = nopin ? 0 : cryptodev_num_pages(
+ (unsigned long) dst, bufsize);
+
+ tmp = ablkcipher_alloc_tmp(ses_ptr->tfm_ablkcipher,
+ dst_flag ? (nr_spages + nr_dpages) :
+ nr_spages, nopin, bufsize, ssg_num);
+ if (!tmp)
+ return -ENOMEM;
+
+ /* Setting the request, Digest, and sg */
+ result = (struct async_result *) tmp;
+ req = ablkcipher_result_req(ses_ptr->tfm_ablkcipher, result);
+ ssg = ablkcipher_req_sg(ses_ptr->tfm_ablkcipher, req);
+ if (nopin) {
+ data = ablkcipher_ssg_data(ssg, ssg_num);
+ if (src && copy_from_user(data, src, bufsize)) {
+ printk(KERN_ERR PFX
+ "failed to copy cop cipher "
+ "data from user space\n");
+ kfree(tmp);
+ return -EINVAL;
+ }
+ CD_HEXDUMP(data, bufsize);
+ cryptodev_sg_setbuf(data, bufsize, ssg, ssg_num);
+
+ dsg = ssg;
+ } else {
+ result->spages = ablkcipher_ssg_spages(ssg, nr_spages);
+ ret = cryptodev_set_user_pages(src, ssg, result->spages,
+ bufsize, &nr_spages,
+ &result->null_buf);
+ if (ret)
+ goto out_tmp;
+ if (dst_flag) {
+ dsg = ablkcipher_spages_dsg(result->spages, nr_spages);
+ result->dpages = ablkcipher_dsg_dpages(dsg, nr_dpages);
+ ret = cryptodev_set_user_pages(dst, dsg,
+ result->dpages, bufsize, &nr_dpages,
+ &result->null_buf);
+ if (ret)
+ goto out_spages;
+ } else {
+ dsg = ssg;
+ result->dpages = result->spages;
+ }
+ }
+
+ if (iocb) {
+ result->iocbvec = iocb;
+ result->nr_spages = nr_spages;
+ result->nr_dpages = nr_dpages;
+ result->nopin_data = data;
+ result->session_ptr = ses_ptr;
+ result->udata = cop->udata;
+ result->aio_enc = cop->eop;
+ result->aio_dst_data = dst;
+ result->aio_size_data = bufsize;
+ ablkcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ cryptodev_async_aio_complete,
+ result);
+ } else {
+ atomic_set(&result->opcnt, 2);
+ init_completion(&(result->crypto_completion));
+ ablkcipher_request_set_callback(req,
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
+ cryptodev_sync_complete,
+ result);
+ }
+ ablkcipher_request_set_crypt(req, ssg, dsg, bufsize, cop->iv);
+
+ atomic_inc(&ses_ptr->refcnt);
+ if (cop->eop == COP_ENCRYPT)
+ ret = crypto_ablkcipher_encrypt(req);
+ else
+ ret = crypto_ablkcipher_decrypt(req);
+
+ switch (ret) {
+ case 0:
+ if (!iocb)
+ atomic_dec(&result->opcnt);
+ break;
+ case -EINPROGRESS:
+ case -EBUSY:
+ if (iocb) {
+ CDPRINTK(2, KERN_INFO,
+ "Async Call ACRYPTO:Returning Now\n");
+ if (nopin)
+ return -EIOCBRETRY;
+ else
+ return -EIOCBQUEUED;
+ }
+ ret = wait_for_completion_interruptible(
+ &(result->crypto_completion));
+ if (!ret)
+ ret = result->err;
+ if (!ret) {
+ INIT_COMPLETION(result->crypto_completion);
+ break;
+ }
+ printk(KERN_ERR PFX "sid %p enc/dec failed error %d\n",
+ ses_ptr, -ret);
+ break;
+ default:
+ if (!iocb)
+ atomic_dec(&result->opcnt);
+ printk(KERN_ERR PFX "sid %p enc/dec failed error %d\n",
+ ses_ptr, -ret);
+ break;
+ }
+
+ if (nopin && !ret) {
+ if (copy_to_user(dst, data, bufsize))
+ printk(KERN_ERR PFX
+ "failed to copy encrypted data"
+ " to user space\n");
+ CD_HEXDUMP(data, bufsize);
+ }
+
+ /* Check if last reference */
+ if (atomic_dec_and_test(&ses_ptr->refcnt))
+ cryptodev_destroy_session(ses_ptr);
+ if (!iocb) {
+ if (atomic_dec_and_test(&result->opcnt))
+ goto out_dpages;/* cleanup */
+ else
+ return ret;
+ }
+out_dpages:
+ if (dst_flag)
+ cryptodev_release_pages(result->dpages, nr_dpages);
+out_spages:
+ cryptodev_release_pages(result->spages, nr_spages);
+out_tmp:
+ cryptodev_destroy_res(result);
+ return ret;
+}
+
+/**
+ * Asynchronous Function Support
+ *
+ */
+static void cryptodev_async_aio_complete(struct crypto_async_request *req,
+ int err)
+{
+ struct async_result *res;
+ struct csession *ses_ptr;
+ struct kiocb *iocb;
+ int err2 = 0;
+ int done = 1;
+
+ res = req->data;
+ iocb = res->iocbvec;
+ res->err = err;
+
+ if (err == -EINPROGRESS)
+ return;
+ if (!res)
+ return;
+
+ CDPRINTK(2, KERN_INFO, "Asynchrnous call-back Called\n");
+
+ if (res->spages)
+ cryptodev_release_pages(res->spages, res->nr_spages);
+
+ ses_ptr = res->session_ptr;
+ err2 = iocb->ki_nbytes;
+
+ switch (ses_ptr->mode) {
+ case CRYPTO_MODE_ACIPHER:
+ case CRYPTO_MODE_AEAD:
+ if (res->dpages) {
+ if (res->dpages != res->spages)
+ cryptodev_release_pages(res->dpages,
+ res->nr_dpages);
+ aio_complete(res->iocbvec, err2, err);
+ /* No need to copy anything to user
+ since, using Direct I/O */
+ } else {
+ done = 0;
+ iocb->private = res;
+ kick_iocb(iocb);
+ }
+ break;
+ case CRYPTO_MODE_AHASH:
+ done = 0;
+ iocb->private = res;
+ kick_iocb(iocb);
+ break;
+ }
+
+ if (done) {
+ if (atomic_dec_and_test(&ses_ptr->refcnt))
+ cryptodev_destroy_session(ses_ptr);
+ cryptodev_destroy_res(res);
+ }
+}
+
+static int cryptodev_aio_write_retry(struct kiocb *iocb)
+{
+ struct async_result *res = iocb->private;
+ struct csession *ses_ptr;
+ int ret;
+ char *digest;
+ int size;
+
+ ses_ptr = res->session_ptr;
+ ret = res->err;
+
+ if (ret == -EINPROGRESS)
+ return -EINPROGRESS;
+
+ switch (ses_ptr->mode) {
+ case CRYPTO_MODE_ACIPHER:
+ case CRYPTO_MODE_AEAD:
+ size = res->aio_size_data;
+ if (copy_to_user(res->aio_dst_data, res->nopin_data, size)) {
+ printk(KERN_ERR PFX
+ "failed to copy encrypted data "
+ "to user space\n");
+ ret = -EFAULT;
+ }
+ break;
+ case CRYPTO_MODE_AHASH:
+ digest = (char *) ((unsigned long) res +
+ sizeof(struct async_result));
+ size = crypto_ahash_digestsize(ses_ptr->tfm_ahash);
+
+ if (copy_to_user(res->aio_dst_data, digest, size)) {
+ printk(KERN_ERR PFX
+ "sid %p failed to copy mac data to"
+ "user space for hash\n", ses_ptr);
+ ret = -EFAULT;
+ }
+ break;
+ }
+
+ if (atomic_dec_and_test(&ses_ptr->refcnt))
+ cryptodev_destroy_session(ses_ptr);
+ cryptodev_destroy_res(res);
+
+ return ret;
+}
+
+/**
+ * Helper Functions for File Descriptor setting and releasing
+ *
+ */
+static int cryptodev_open(struct inode *inode, struct file *filp)
+{
+ struct cryptodev_ctx *ctx;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mutex_init(&ctx->lock);
+ ctx->session = NULL;
+ filp->private_data = ctx;
+
+ return 0;
+}
+
+static int cryptodev_release(struct inode *inode, struct file *filp)
+{
+ struct cryptodev_ctx *ctx = filp->private_data;
+ struct csession *ses_ptr;
+
+ if (!ctx)
+ return 0;
+
+ mutex_lock(&ctx->lock);
+ ses_ptr = ctx->session;
+
+ if (!ses_ptr)
+ goto out;
+ if (atomic_dec_and_test(&ses_ptr->refcnt))
+ cryptodev_destroy_session(ses_ptr);
+
+out:
+ filp->private_data = NULL;
+ mutex_unlock(&ctx->lock);
+ kfree(ctx);
+
+ return 0;
+}
+
+static struct csession *cryptodev_user_create_session(void *arg)
+{
+ struct session_op sop;
+ void *session_udata;
+ int size;
+ int ret = 0;
+ struct csession *ses;
+
+ if (copy_from_user(&sop, (void *) arg, sizeof(sop))) {
+ printk(KERN_ERR PFX "copy of session data failed\n");
+ ret = -EFAULT;
+ return ERR_PTR(ret);
+ }
+
+ size = sop.algo_size + sop.hmackey_size + sop.key_size + sop.icv_size;
+ session_udata = kzalloc(size, GFP_KERNEL);
+ if (!session_udata) {
+ ret = -ENOMEM;
+ return ERR_PTR(ret);
+ }
+ if (copy_from_user(session_udata, (void *) arg + sizeof(sop), size)) {
+ printk(KERN_ERR PFX "failed to copy sop data\n");
+ ret = -EINVAL;
+ goto out_sess;
+ }
+
+ ses = cryptodev_create_session(&sop, session_udata);
+ ret = PTR_ERR(ses);
+
+out_sess:
+ kfree(session_udata);
+ return ERR_PTR(ret);
+}
+
+static int crypto_dev_user_setkey(struct csession *ses_ptr, void *arg)
+{
+ struct key_op kop;
+ char *keyp;
+ u16 key_size;
+ int ret;
+
+ if (copy_from_user(&kop, (void *) arg, sizeof(kop))) {
+ printk(KERN_ERR PFX "Copy of key data failed"
+ "at CIOCKEY from user space\n");
+ return -EFAULT;
+ }
+
+ key_size = kop.ksize;
+ keyp = kzalloc(key_size, GFP_KERNEL);
+ if (!keyp)
+ return -ENOMEM;
+
+ if (copy_from_user(keyp, (void *) arg + sizeof(kop), key_size)) {
+ printk(KERN_ERR PFX "copy of key data failed\n");
+ kfree(keyp);
+ return -EFAULT;
+ }
+ ret = ses_ptr->setkey(ses_ptr, keyp, key_size);
+
+ kfree(keyp);
+ return ret;
+}
+
+static int cryptodev_user_op(struct csession *ses_ptr, const struct iovec *iov,
+ struct kiocb *iocb)
+{
+ struct crypto_item_op cop;
+ int ivsize;
+ int ret;
+
+ cop.udata = kzalloc(iov[0].iov_len, GFP_KERNEL);
+ if (!cop.udata)
+ return -ENOMEM;
+
+ if (copy_from_user(cop.udata, iov[0].iov_base, iov[0].iov_len)) {
+ printk(KERN_ERR PFX "copy of operation data failed\n");
+ ret = -EFAULT;
+ goto out_cryp;
+ }
+
+ ivsize = ses_ptr->getivsize(ses_ptr);
+ if (cop.iv_len != ivsize) {
+ printk(KERN_ERR PFX "ivsize set incorrectly\n");
+ ret = -EINVAL;
+ goto out_cryp;
+ }
+
+ cop.iv = cop.udata->data;
+ cop.assoc = cop.udata->data + cop.iv_len;
+
+ cop.src_data = iov[1].iov_base;
+ cop.src_size = iov[1].iov_len;
+ cop.dst_data = iov[2].iov_base;
+
+ ret = ses_ptr->runop(ses_ptr, &cop, iocb);
+
+ if (ret == -EIOCBRETRY || ret == -EIOCBQUEUED)
+ return ret;
+
+out_cryp:
+ kfree(cop.udata);
+ return ret;
+}
+
+static int cryptodev_ioctl(struct inode *inode, struct file *filp,
+ unsigned int cmd, unsigned long arg)
+{
+ struct csession *ses_ptr;
+ struct cryptodev_ctx *ctx = filp->private_data;
+ int ret;
+
+ if (!ctx) {
+ printk(KERN_ERR PFX "Context Not set for fd\n");
+ return -EINVAL;
+ }
+
+ switch (cmd) {
+ case CIOCGSESSION:
+ mutex_lock(&ctx->lock);
+ ses_ptr = ctx->session;
+ if (ses_ptr) {
+ printk(KERN_ERR PFX "Session data already set\n");
+ mutex_unlock(&ctx->lock);
+ return -EINVAL;
+ }
+ ses_ptr = cryptodev_user_create_session((void *) arg);
+ if (!IS_ERR(ses_ptr)) {
+ ctx->session = ses_ptr;
+ ret = 0;
+ } else
+ ret = PTR_ERR(ses_ptr);
+ mutex_unlock(&ctx->lock);
+ return ret;
+ case CIOCKEY:
+ ses_ptr = ctx->session;
+ if (!ses_ptr) {
+ printk(KERN_ERR PFX "session data does not exist\n");
+ return -EINVAL;
+ }
+ return crypto_dev_user_setkey(ses_ptr, (void *) arg);
+ default:
+ printk(KERN_ERR PFX "un-supported command 0x%08X\n", cmd);
+ return -EINVAL;
+ }
+}
+
+static int cyptodev_aio_write(struct kiocb *iocb, const struct iovec *iov,
+ unsigned long nr_segs, loff_t o)
+{
+ struct file *filp = iocb->ki_filp;
+ struct cryptodev_ctx *ctx = filp->private_data;
+ struct csession *ses_ptr;
+
+ if (!ctx) {
+ printk(KERN_ERR PFX "Context Not set for fd\n");
+ return -EINVAL;
+ }
+
+ ses_ptr = ctx->session;
+ if (!ses_ptr) {
+ printk(KERN_ERR PFX "session data does not exist\n");
+ return -EINVAL;
+ }
+
+ if (is_sync_kiocb(iocb)) {
+ CDPRINTK(2, KERN_INFO, "Synchronous call\n");
+ return cryptodev_user_op(ses_ptr, iov, NULL);
+ } else {
+ CDPRINTK(2, KERN_INFO, "Asynchronous call\n");
+ iocb->ki_retry = cryptodev_aio_write_retry;
+ return cryptodev_user_op(ses_ptr, iov, iocb);
+ }
+}
+
+struct file_operations cryptodev_fops = {
+ .owner = THIS_MODULE,
+ .open = cryptodev_open,
+ .release = cryptodev_release,
+ .ioctl = cryptodev_ioctl,
+ .aio_write = cyptodev_aio_write,
+};
+
+struct miscdevice cryptodev = {
+ .minor = CRYPTODEV_MINOR,
+ .name = "crypto",
+ .fops = &cryptodev_fops,
+};
+
+static int cryptodev_register(void)
+{
+ int rc;
+
+ rc = misc_register(&cryptodev);
+ if (rc) {
+ printk(KERN_ERR PFX
+ "failed to register /dev/crypto error %d \n", rc);
+ return rc;
+ }
+ return 0;
+}
+
+static void cryptodev_deregister(void)
+{
+ misc_deregister(&cryptodev);
+}
+
+/**
+ * Module init/exit
+ *
+ */
+int __init cryptodev_init(void)
+{
+ int rc;
+
+ rc = cryptodev_register();
+ if (rc)
+ return rc;
+
+ printk(KERN_INFO "User space CryptoAPI driver v%s loaded\n",
+ CRYPTODEV_VER);
+
+ return 0;
+}
+
+void __exit cryptodev_exit(void)
+{
+ cryptodev_deregister();
+ printk(KERN_INFO "User space CryptoAPI driver v%s unloaded\n",
+ CRYPTODEV_VER);
+}
+
+module_init(cryptodev_init);
+module_exit(cryptodev_exit);
+
+MODULE_AUTHOR("Shasi Pulijala <[email protected]>");
+MODULE_DESCRIPTION("Linux CryptoAPI user space driver");
+MODULE_LICENSE("GPL");
diff --git a/include/linux/cryptodev.h b/include/linux/cryptodev.h
new file mode 100644
index 0000000..11996b0
--- /dev/null
+++ b/include/linux/cryptodev.h
@@ -0,0 +1,79 @@
+/****************************************************************************
+ * cryptodev.h
+ *
+ * Linux CryptoAPI user space interface module
+ *
+ * Copyright (c) 2008 Shasi Pulijala <[email protected]>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ *
+ * Detail Description:
+ * This file defines ioctl structures for the Linux CryptoAPI interface. It
+ * provides user space applications accesss into the Linux CryptoAPI
+ * functionalities.
+ *
+ ****************************************************************************
+ */
+
+#ifndef __CRYPTODEV_H__
+#define __CRYPTODEV_H__
+
+/**
+ * @struct session_op
+ * @brief ioctl parameter to create a session
+ *
+ *
+ */
+struct session_op {
+
+ __u16 algo_size;
+ __u16 key_size; /* cipher key length*/
+ __u16 hmackey_size; /* mac key length*/
+ __u16 icv_size; /*authsize (ccm, gcm)*/
+ __u8 data[0];
+};
+
+/**
+ * @struct key_op
+ * @brief ioctl parameter to change the session key
+ *
+ */
+struct key_op {
+ __u16 ksize;
+ __u8 data[0];
+};
+
+/**
+ * @struct crypt_op
+ * @brief ioctl parameter to request a crypt/decrypt operation
+ * against a session
+ *
+ *
+ */
+#define CRYPTO_MAX_DATA_LEN (64*1024 - 1)
+struct crypt_op {
+#define COP_NONE 0
+#define COP_ENCRYPT 1
+#define COP_DECRYPT 2
+ __u16 op; /* i.e. COP_ENCRYPT */
+ __u16 flags;
+ __u16 iv_size;
+ __u16 assoc_size;
+ __u8 data[0]; /* must be big enough for chosen MAC */
+};
+
+/* create crypto session */
+#define CIOCGSESSION _IOWR('c', 101, struct session_op)
+/* change crypto key */
+#define CIOCKEY _IOWR('c', 102, struct key_op)
+
+#endif
--
1.5.5



2008-11-21 14:08:08

by Evgeniy Polyakov

[permalink] [raw]
Subject: Re: [PATCH 1/1 v8] Add CryptoAPI User Interface Support v8

Hi.

On Mon, Nov 17, 2008 at 03:31:42PM -0800, Shasi Pulijala ([email protected]) wrote:
> This patch v8 includes the code that prevents synchronous calls from freeing data when request completion is interrupted, while the data may still be accessed by the crypto code in parallel.
> From: Shasi Pulijala <[email protected]>

Looks ok, I did not find any obvious error.

--
Evgeniy Polyakov

2009-01-13 05:08:49

by Herbert Xu

[permalink] [raw]
Subject: Re: [PATCH 1/1 v8] Add CryptoAPI User Interface Support v8

On Mon, Nov 17, 2008 at 03:31:42PM -0800, Shasi Pulijala wrote:
>
> This patch v8 includes the code that prevents synchronous calls from freeing data when request completion is interrupted, while the data may still be accessed by the crypto code in parallel.
> From: Shasi Pulijala <[email protected]>
>
>
> Signed-off-by: Shasi Pulijala <[email protected]>
> Acked-by: Loc Ho <[email protected]>

I'm only going to comment on the user-space interface as without
an agreement on that there is no point in talking about the
implementation.

Now at the very outset I gave a list of requirements that the
user-space interface should satisfy. One of them was the ability
to extend the API to cover arbitrary operations. This is very
important IMO as otherwise we may have to come up with an entirely
new interface should we extend the kernel crypto API with a new
algorithm type or operation.

> +/**
> + * @struct session_op
> + * @brief ioctl parameter to create a session
> + *
> + *
> + */
> +struct session_op {
> +
> + __u16 algo_size;
> + __u16 key_size; /* cipher key length*/
> + __u16 hmackey_size; /* mac key length*/
> + __u16 icv_size; /*authsize (ccm, gcm)*/
> + __u8 data[0];
> +};
> +
> +/**
> + * @struct key_op
> + * @brief ioctl parameter to change the session key
> + *
> + */
> +struct key_op {
> + __u16 ksize;
> + __u8 data[0];
> +};

As such I think this interface is inadequate by the fact that
it is based completely on the traditional crypto operations.

What I'd like to see is a much more abstract interface where
details such as the meaning of arguments or their format is
left up to each algorithm type to decide.

Now I'm happy with the underlying transport that you have come
up with, i.e., the use of IOVs to delineate arguments for an
individual operation and the use of ioctls to set session/tfm
attributes.

So what I think we need to do here is to change the protocol
on top of this transport to be more generic.

For instance, for session setup, rather than having multiple
ioctl's for each type, I'd like to see a single entry point
that carries the configuration info in a type-specific format
(perhaps netlink like) which we then pass to each algorithm type
to decode.

Similarly, for each operations the generic layer should simply
pass the IOVs to each individual type which can do whatever they
want with it.

At the end of the day I'd like to have an interface where if
I were to add a new algorithm type (like rng that we just added),
I could simply write the hooks for it and it'll work without
me having to modify the guts of the interface.

Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2009-02-07 00:13:46

by Loc Ho

[permalink] [raw]
Subject: RE: [PATCH 1/1 v8] Add CryptoAPI User Interface Support v8

Hi Herbert,

I had looked into what you suggested. What about this implementation design below. I will show an implementation for ahash type only:

File: algapi.h

struct crypto_type {
unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
void (*exit)(struct crypto_tfm *tfm);
void (*show)(struct seq_file *m, struct crypto_alg *alg);
int (*uspace_setparam)(struct crypto_tfm *tfm, void *param, u32 param_size);
int (*uspace_op)(struct crypto_tfm *tfm,
const struct iovec *iov, u32 iov_cnt,
crypto_completion_t cb, void *ctx);
};

Please note the extra function uspace_setparam and uspace_op. Please note that
the parameter 'void *param' from function uspace_setparam are kernel space pointer.
The parameter 'const struct iovec *iov' are kernel space pointer. The handling of user
space pointers will be handled by cryptodev driver. Linux CryptoAPI algorithm type will
always expect kernel accessible pointers.

File: cryptodev.h

struct uspace_session {
__u8 alg_name[64];
__u32 front_type; /* I don't believe we need this - leave for now */
__u32 type; /* We might need to allow a value of 0 for any */
__u32 mask; /* We might need to allow a value of 0 for any */
};

struct uspace_param {
__u16 size; /* We need this parameter as I/O control is only one pointer */
__u8 data[0]; /* Variable size - parameters are crypto type specific */
};

struct uspace_param_ahash { /* This is the data field of struct uspace_param */
__u16 hmackey_size;
__u8 data[0];
};

struct uspace_param_aead { /* This is the data field of struct uspace_param */
__u16 key_size;
__u16 hmackey_size;
__u8 data[0];
};

/* Create crypto session */
#define CIOCGSESSION _IOWR('c', 101, struct session_op)
/* Set algorithm parameters */
#define CIOCPARAM _IOWR('c', 102, struct key_op)

struct upsace_aead_op { /* Per operation for aead type */
#define COP_NONE 0
#define COP_ENCRYPT 1
#define COP_DECRYPT 2
__u16 op; /* i.e. COP_ENCRYPT */
__u16 flags;
__u16 iv_size;
__u16 assoc_size;
__u8 data[0]; /* parameter for aead per operation */
};

File: cryptodev.c

Ioctl function:

switch (cmd) {
case CIOCGSESSION:
a bunch of cryptodev code as before to decode alg_name, alg_type, alg_mask...
/* Now create the session */
ca = __crypto_alg_lookup(alg_name, alg_type, alg_mask);
tfm = __crypto_alloc_tfm(ca, alg_type, alg_mask);
/* Now save ca and tfm into cryptodev context structure */
/* With this implementation, it relies on Linux CryptoAPI to create the tfm and
no extra switch, if, and etc statement. */
break;
case CIOCPARAM:
a bunch of cryptodev code as before to decode 'struct uspace_param'
/* Extract the alg type and tfm */
tftm = ...
ca = ...
ca->cra_type->uspace_setparam(tfm, param, param_size);
break;
}

Write function (Per a operation):

Cryptodev_write(...)
{
A bound of cryptodev code as before to map in the vector buffers.
For ahash, it is just the data to be hashed. For aead, there is
The data buffer and aead parameters. Plus cryptodev house works
/* At this point, we have two pointers accessible by kernel - data to
be hashed and result pointer - let call this iov[2] */
/* Extract tftm and crypto alg */
ca = ...
tftm = ...
ca->cra_type->uspace_op(tftm, iov, 2, cd_write_cb, context_ptr);
return;
}

... cd_write_cb(...)
{
A bound of cryptodev code as before... If pages are pinned, then
not much to do. If not, do copy to user space function calls.
}

File ahash.c:


static int crypto_ahash_us_setparam(struct crypto_tfm *tfm,
void *param, u32 param_size)
{
struct crypto_ahash *atfm = __crypto_ahash_cast(tfm);
struct uspace_ahash_param *usp = param;
int rc;

rc = crypto_ahash_setkey(atfm, usp->data, usp->hmackey_size);
::::
return rc;
}

static int crypto_ahash_us_op(struct crypto_tfm *tfm, const struct iovec *iov,
u32 iov_cnt, crypto_completion_t cb, void *ctx)
{
struct crypto_ahash * atfm = __crypto_ahash_cast(tfm);
struct ahash_request * req;
struct scatterlist * ssg;
int len;
int len_sg;
int rc;

len = sizeof(struct ahash_request) + crypto_ahash_reqsize(atfm);
len = ALIGN(len, __alignof__(struct scatterlist));
len_sg = len;
len += sizeof(struct scatterlist);
req = kzalloc(len, GFP_KERNEL);
if (req == NULL)
return -ENOMEM;

/* For this example, iov[0] is data to be hashed and iov[1] is result buffer */
ssg = (struct scatterlist *) ((u8 *) req + len_sg);
sg_init_one(ssg, iov[0].iov_base, iov[0].iov_len);

ahash_request_set_tfm(req, atfm);
ahash_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG, cb, ctx);
ahash_request_set_crypt(req, ssg, iov[1].iov_base, iov[0].iov_len);

rc = crypto_ahash_digest(req);
switch (rc) {
case -EINPROGRESS:
break;
case 0:
case -EBUSY:
default:
kfree(req);
break;
}
return rc;
}

const struct crypto_type crypto_ahash_type = {
.ctxsize = crypto_ahash_ctxsize,
.init = crypto_init_ahash_ops,
#ifdef CONFIG_PROC_FS
.show = crypto_ahash_show,
#endif
.uspace_setparam = crypto_ahash_us_setparam,
.uspace_op = crypto_ahash_us_op
};

This addresses the algorithm abstraction as well as support for any CryptoAPI
algorithm types. All user space buffers mapping to kernel space will still be handled
by CryptoDev driver. This avoids creating a bunch of complication for each type of
CryptoAPI type of algorithms.

-Loc

-----Original Message-----
From: Herbert Xu [mailto:[email protected]]
Sent: Tuesday, January 27, 2009 8:47 PM
To: Shasi Pulijala
Cc: Loc Ho
Subject: Re: [PATCH 1/1 v8] Add CryptoAPI User Interface Support v8

On Tue, Jan 20, 2009 at 11:40:21PM -0800, Shasi Pulijala wrote:
> Hi Herbert,
>
> Thanks for all your comments and suggestions.
> As per your comments we have come up with the following design for the
> user space design. The below are the structures and
> API's that provide the user interface to the cryptodev implementation.
> Could you please review this once and let me know
> if it's a better abstract interface or if you see any problems with it.

Thanks for the quick turn-around!

> #define CRYPTODEV_NCRYPTO Max num of Operations

If by operations you mean things like encrypt/decrypt/compress/decompress
then we shouldn't have a global ID space for them. They should be
per-type. That is, you have a separate ID space fir ciphers, hashes,
and so on.

> static struct cryptodev_link_handler
> *cryptodev_msg_handlers[CRYPTODEV_NCRYPTO];
> int cryptodev_register_handlers(int protocol,
> session_link_handler sessit, operation_link_handler
> opit)

Is this the kernel API? If so I don't think we need any new
registration interface since we can just go through the existing
crypto_type object.

So what would happen is that upon initilisation user-space gives
us a 4-tuple of (name, frontend type, type, mask). We use that
to create the tfm object which would give us a pointer to the
frontend type (note that we don't currently have that pointer
but we should add it to crypto_tfm) that should then contain all
the hooks to further parse the requests from user-space.

After initilisation we would give user-space some sort of an ID
to the tfm object so all subsequent operations simply need to give
us the ID to get to the tfm and the rest of the info.

In fact we can just generate one file descriptor per tfm since
that gives us a natural limit on how many tfms you can allocate,
plus you can poll on it without being overwhelmed.

> struct cryptodev_cipher_op {
> #define COP_NONE 0
> #define COP_ENCRYPT 1
> #define COP_DECRYPT 2
> __u16 op; /* i.e. COP_ENCRYPT */
> __u16 flags;
> __u16 iv_size;
> __u8 data[0];
> }

It's probably best to separate the meta-data from the data since
making them contiguous is not always easy. But otherwise this
looks fine to me.

> /** For Session **/
>
> struct cryptodev_param_algo {
> int type;
> int param_len;
> char param[0];
> };
>
> struct cryptodev_cipher_algo {
> char alg_name[64];
> __u16 alg_key_len;
> char alg_key[0];
> };
>
> struct cryptodev_aead_algo {
> char alg_name[64];
> __u16 alg_key_len;
> __u16 alg_icv_len;
> char alg_key[0];
> };

As I said above, I think we should have a generic 4-tuple struct
that is used to allocate the tfm, after which we can just use the
file descriptor.

> Session Related API's
>
> static int cryptodev_ioctl(struct inode *inode, struct file *filp,
> unsigned int cmd, unsigned long arg)
> {
> / *This will called when the user does an ioctl session creation */
> This routine will do:
> Creating the session,
> copy_from_user(cryptodev_param_algo, (void *) arg, sizeof(struct
> cryptodev_param_algo))
> Check the type value in cryptodev_param_algo structure, and call
> the appropriate
> session creation handler
>
> }

Right, I think we really only need to one ioctl to allocate the tfm,
which returns a new file descriptor and everything else can be done
with read/write.

Thanks,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2009-02-23 09:36:08

by Herbert Xu

[permalink] [raw]
Subject: Re: [PATCH 1/1 v8] Add CryptoAPI User Interface Support v8

On Fri, Feb 06, 2009 at 04:13:40PM -0800, Loc Ho wrote:
>
> struct crypto_type {
> unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
> int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
> void (*exit)(struct crypto_tfm *tfm);
> void (*show)(struct seq_file *m, struct crypto_alg *alg);
> int (*uspace_setparam)(struct crypto_tfm *tfm, void *param, u32 param_size);
> int (*uspace_op)(struct crypto_tfm *tfm,
> const struct iovec *iov, u32 iov_cnt,
> crypto_completion_t cb, void *ctx);
> };

I'd like to see a name field. It's better to have user-space
pass through a string for the type instead of an ID.

The reason you want user-space to pass that through is so that
you can locate the crypto_type object and then call crypto_alloc_tfm
on it..

Also the other two functions should take a void * instead of
crypto_tfm * since crypto_alloc_tfm now returns that.

> struct uspace_session {
> __u8 alg_name[64];
> __u32 front_type; /* I don't believe we need this - leave for now */

So instead of front_type I'd like to see front_type_name.

> struct uspace_param {
> __u16 size; /* We need this parameter as I/O control is only one pointer */
> __u8 data[0]; /* Variable size - parameters are crypto type specific */
> };
>
> struct uspace_param_ahash { /* This is the data field of struct uspace_param */
> __u16 hmackey_size;
> __u8 data[0];
> };
>
> struct uspace_param_aead { /* This is the data field of struct uspace_param */
> __u16 key_size;
> __u16 hmackey_size;
> __u8 data[0];
> };
>
> /* Create crypto session */
> #define CIOCGSESSION _IOWR('c', 101, struct session_op)
> /* Set algorithm parameters */
> #define CIOCPARAM _IOWR('c', 102, struct key_op)
>
> struct upsace_aead_op { /* Per operation for aead type */
> #define COP_NONE 0
> #define COP_ENCRYPT 1
> #define COP_DECRYPT 2
> __u16 op; /* i.e. COP_ENCRYPT */
> __u16 flags;
> __u16 iv_size;
> __u16 assoc_size;
> __u8 data[0]; /* parameter for aead per operation */
> };

OK, I presume the idea is to stuff this into the iov that's passed
to uspace_op?

I'm starting to think that write(2) isn't really what we want. The
reason is that each operation always has two types of data: obviously
we have the data itself, plus ancillary information like this struct
right here.

So how about switching over to the socket interface:

1. Create fd.

fd = socket(AF_ALGO, SOCK_DGRAM, 0);

2. Allocate tfm, alg_name would be the uspace_session struct.

connect(fd, alg_name, alg_name_len);

3. Set parameters.

setsockopt(fd, ..., param, param_len);

4. Operate.

sendmsg(fd, msg, flags);

Here struct msghdr provides for both the IOV array and space for
ancillary information.

Then the uspace_op function would just need to take a msg plus
the callback stuff.

Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2009-02-23 13:53:48

by Herbert Xu

[permalink] [raw]
Subject: Re: [PATCH 1/1 v8] Add CryptoAPI User Interface Support v8

On Mon, Feb 23, 2009 at 05:35:58PM +0800, Herbert Xu wrote:
>
> 4. Operate.
>
> sendmsg(fd, msg, flags);
>
> Here struct msghdr provides for both the IOV array and space for
> ancillary information.

In fact we should take this one step further. Instead of having
write/send do both input as well as output, it should just do the
former. That is, we should call read/recv to get the output.

This might seem a step backwards, but in fact it's the only sane
way to do zero-copy. In order to get zero copy with this, you'd
use splice instead of write (the setup can be done with the usual
corking method), and the receive side would use vmsplice + splice.

You might argue that this is too much overhead. However, it isn't
because

1) For the cases where you have less than a page of data, just
use send/recv as the overhead of the copy is dwarfed by the overhead
of the rest of the crypto operation.

2) Where zero-copy does matter the setup overhead is immaterial.

Of course to make vmsplice + splice work, we don't want to start
the operation until the first read request from the user is issued.

Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2009-02-25 03:26:41

by Herbert Xu

[permalink] [raw]
Subject: Re: [PATCH 1/1 v8] Add CryptoAPI User Interface Support v8

On Mon, Feb 23, 2009 at 10:46:43AM -0800, Loc Ho wrote:
>
> I'd like to see a name field. It's better to have user-space
> pass through a string for the type instead of an ID.
>
> The reason you want user-space to pass that through is so that
> you can locate the crypto_type object and then call crypto_alloc_tfm
> on it..
>
> [Loc Ho]
> The name field would not be known here. It will be at the cryptodev layer as it is the layer that create the tfm. In the above two functions, the parameter tfm has already been created.

Well it would be known if you added a name field to uspace_session
below :)

> Also the other two functions should take a void * instead of
> crypto_tfm * since crypto_alloc_tfm now returns that.
>
> [Loc Ho]
> The function ctxsize, init, exit, and show already existed. For cryptodev, I added only uspace_setparam and uspace_op. We will change their first parameter to void *.

Right I only meant the new functions. These existing functions
will retain the crypto_tfm.

> > struct uspace_session {
> > __u8 alg_name[64];
> > __u32 front_type; /* I don't believe we need this - leave for now */
>
> So instead of front_type I'd like to see front_type_name.
>
> [Loc Ho]
> Okay... Maybe you see some value having a front_type_name. But as of right now, I only see the need for alg_name, type, and mask.

It makes a difference because for different types we need to call
different construnction/alloc functions. In fact, I think we should
add a construction function to the type object too.

> [Loc Ho]
> Okay... This would requires the caller to call the recvmsg and make use of standard async/sync socket interface. For async interface, caller will have to call the recvmsg. A sendmsg will always returns success unless HW ring is full. For sync interface, the function will block until the operation completed. If we switch to this interface, are you going to stick with this interface and apply this - at least until there is a better reason to change? Even if there is a good reason to change, I would like to see a version available in the crypto tree if not Linux kernel release tree. We will also look at splice as well if time permit.

Well once an interface hits Linus's tree that's it, game over :)
So that's we're having all these discussions in order to make sure
that we don't have any regrets.

Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt

2009-02-23 18:46:46

by Loc Ho

[permalink] [raw]
Subject: RE: [PATCH 1/1 v8] Add CryptoAPI User Interface Support v8

Hi,

See inline...

-Loc

>
> struct crypto_type {
> unsigned int (*ctxsize)(struct crypto_alg *alg, u32 type, u32 mask);
> int (*init)(struct crypto_tfm *tfm, u32 type, u32 mask);
> void (*exit)(struct crypto_tfm *tfm);
> void (*show)(struct seq_file *m, struct crypto_alg *alg);
> int (*uspace_setparam)(struct crypto_tfm *tfm, void *param, u32 param_size);
> int (*uspace_op)(struct crypto_tfm *tfm,
> const struct iovec *iov, u32 iov_cnt,
> crypto_completion_t cb, void *ctx);
> };

I'd like to see a name field. It's better to have user-space
pass through a string for the type instead of an ID.

The reason you want user-space to pass that through is so that
you can locate the crypto_type object and then call crypto_alloc_tfm
on it..

[Loc Ho]
The name field would not be known here. It will be at the cryptodev layer as it is the layer that create the tfm. In the above two functions, the parameter tfm has already been created.

Also the other two functions should take a void * instead of
crypto_tfm * since crypto_alloc_tfm now returns that.

[Loc Ho]
The function ctxsize, init, exit, and show already existed. For cryptodev, I added only uspace_setparam and uspace_op. We will change their first parameter to void *.


> struct uspace_session {
> __u8 alg_name[64];
> __u32 front_type; /* I don't believe we need this - leave for now */

So instead of front_type I'd like to see front_type_name.

[Loc Ho]
Okay... Maybe you see some value having a front_type_name. But as of right now, I only see the need for alg_name, type, and mask.

> struct uspace_param {
> __u16 size; /* We need this parameter as I/O control is only one pointer */
> __u8 data[0]; /* Variable size - parameters are crypto type specific */
> };
>
> struct uspace_param_ahash { /* This is the data field of struct uspace_param */
> __u16 hmackey_size;
> __u8 data[0];
> };
>
> struct uspace_param_aead { /* This is the data field of struct uspace_param */
> __u16 key_size;
> __u16 hmackey_size;
> __u8 data[0];
> };
>
> /* Create crypto session */
> #define CIOCGSESSION _IOWR('c', 101, struct session_op)
> /* Set algorithm parameters */
> #define CIOCPARAM _IOWR('c', 102, struct key_op)
>
> struct upsace_aead_op { /* Per operation for aead type */
> #define COP_NONE 0
> #define COP_ENCRYPT 1
> #define COP_DECRYPT 2
> __u16 op; /* i.e. COP_ENCRYPT */
> __u16 flags;
> __u16 iv_size;
> __u16 assoc_size;
> __u8 data[0]; /* parameter for aead per operation */
> };

OK, I presume the idea is to stuff this into the iov that's passed
to uspace_op?

[Loc Ho]
Yes...

I'm starting to think that write(2) isn't really what we want. The
reason is that each operation always has two types of data: obviously
we have the data itself, plus ancillary information like this struct
right here.

So how about switching over to the socket interface:

1. Create fd.

fd = socket(AF_ALGO, SOCK_DGRAM, 0);

2. Allocate tfm, alg_name would be the uspace_session struct.

connect(fd, alg_name, alg_name_len);

3. Set parameters.

setsockopt(fd, ..., param, param_len);

4. Operate.

sendmsg(fd, msg, flags);
[Loc Ho]
Okay... This would requires the caller to call the recvmsg and make use of standard async/sync socket interface. For async interface, caller will have to call the recvmsg. A sendmsg will always returns success unless HW ring is full. For sync interface, the function will block until the operation completed. If we switch to this interface, are you going to stick with this interface and apply this - at least until there is a better reason to change? Even if there is a good reason to change, I would like to see a version available in the crypto tree if not Linux kernel release tree. We will also look at splice as well if time permit.

Here struct msghdr provides for both the IOV array and space for
ancillary information.

Then the uspace_op function would just need to take a msg plus
the callback stuff.

Cheers,
--
Visit Openswan at http://www.openswan.org/
Email: Herbert Xu ~{PmV>HI~} <[email protected]>
Home Page: http://gondor.apana.org.au/~herbert/
PGP Key: http://gondor.apana.org.au/~herbert/pubkey.txt