Return-Path: Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1756563Ab3I2TeO (ORCPT ); Sun, 29 Sep 2013 15:34:14 -0400 Received: from mail.linuxfoundation.org ([140.211.169.12]:59864 "EHLO mail.linuxfoundation.org" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1756175Ab3I2T3u (ORCPT ); Sun, 29 Sep 2013 15:29:50 -0400 From: Greg Kroah-Hartman To: linux-kernel@vger.kernel.org Cc: Greg Kroah-Hartman , stable@vger.kernel.org, Simo Sorce , "J. Bruce Fields" Subject: [ 67/71] rpc: fix huge kmallocs in gss-proxy Date: Sun, 29 Sep 2013 12:28:19 -0700 Message-Id: <20130929192648.105585695@linuxfoundation.org> X-Mailer: git-send-email 1.8.4.6.g82e253f.dirty In-Reply-To: <20130929192643.539596256@linuxfoundation.org> References: <20130929192643.539596256@linuxfoundation.org> User-Agent: quilt/0.60-1 Sender: linux-kernel-owner@vger.kernel.org List-ID: X-Mailing-List: linux-kernel@vger.kernel.org Content-Length: 3566 Lines: 113 3.11-stable review patch. If anyone has any objections, please let me know. ------------------ From: "J. Bruce Fields" commit 9dfd87da1aeb0fd364167ad199f40fe96a6a87be upstream. The reply to a gssproxy can include up to NGROUPS_MAX gid's, which will take up more than a page. We therefore need to allocate an array of pages to hold the reply instead of trying to allocate a single huge buffer. Tested-by: Simo Sorce Signed-off-by: J. Bruce Fields Signed-off-by: Greg Kroah-Hartman --- net/sunrpc/auth_gss/gss_rpc_upcall.c | 30 ++++++++++++++++++++++++++++++ net/sunrpc/auth_gss/gss_rpc_xdr.c | 3 +++ net/sunrpc/auth_gss/gss_rpc_xdr.h | 5 ++++- 3 files changed, 37 insertions(+), 1 deletion(-) --- a/net/sunrpc/auth_gss/gss_rpc_upcall.c +++ b/net/sunrpc/auth_gss/gss_rpc_upcall.c @@ -213,6 +213,30 @@ static int gssp_call(struct net *net, st return status; } +static void gssp_free_receive_pages(struct gssx_arg_accept_sec_context *arg) +{ + int i; + + for (i = 0; i < arg->npages && arg->pages[i]; i++) + __free_page(arg->pages[i]); +} + +static int gssp_alloc_receive_pages(struct gssx_arg_accept_sec_context *arg) +{ + int i; + + arg->npages = DIV_ROUND_UP(NGROUPS_MAX * 4, PAGE_SIZE); + arg->pages = kzalloc(arg->npages * sizeof(struct page *), GFP_KERNEL); + + for (i=0; i < arg->npages; i++) { + arg->pages[i] = alloc_page(GFP_KERNEL); + if (arg->pages[i] == NULL) { + gssp_free_receive_pages(arg); + return -ENOMEM; + } + } + return 0; +} /* * Public functions @@ -261,10 +285,16 @@ int gssp_accept_sec_context_upcall(struc arg.context_handle = &ctxh; res.output_token->len = GSSX_max_output_token_sz; + ret = gssp_alloc_receive_pages(&arg); + if (ret) + return ret; + /* use nfs/ for targ_name ? */ ret = gssp_call(net, &msg); + gssp_free_receive_pages(&arg); + /* we need to fetch all data even in case of error so * that we can free special strctures is they have been allocated */ data->major_status = res.status.major_status; --- a/net/sunrpc/auth_gss/gss_rpc_xdr.c +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.c @@ -780,6 +780,9 @@ void gssx_enc_accept_sec_context(struct /* arg->options */ err = dummy_enc_opt_array(xdr, &arg->options); + xdr_inline_pages(&req->rq_rcv_buf, + PAGE_SIZE/2 /* pretty arbitrary */, + arg->pages, 0 /* page base */, arg->npages * PAGE_SIZE); done: if (err) dprintk("RPC: gssx_enc_accept_sec_context: %d\n", err); --- a/net/sunrpc/auth_gss/gss_rpc_xdr.h +++ b/net/sunrpc/auth_gss/gss_rpc_xdr.h @@ -147,6 +147,8 @@ struct gssx_arg_accept_sec_context { struct gssx_cb *input_cb; u32 ret_deleg_cred; struct gssx_option_array options; + struct page **pages; + unsigned int npages; }; struct gssx_res_accept_sec_context { @@ -240,7 +242,8 @@ int gssx_dec_accept_sec_context(struct r 2 * GSSX_max_princ_sz + \ 8 + 8 + 4 + 4 + 4) #define GSSX_max_output_token_sz 1024 -#define GSSX_max_creds_sz (4 + 4 + 4 + NGROUPS_MAX * 4) +/* grouplist not included; we allocate separate pages for that: */ +#define GSSX_max_creds_sz (4 + 4 + 4 /* + NGROUPS_MAX*4 */) #define GSSX_RES_accept_sec_context_sz (GSSX_default_status_sz + \ GSSX_default_ctx_sz + \ GSSX_max_output_token_sz + \ -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majordomo@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/