Received: by 10.223.185.116 with SMTP id b49csp8046307wrg; Thu, 1 Mar 2018 16:11:32 -0800 (PST) X-Google-Smtp-Source: AG47ELsEDGap0HqetVnk/JzX/qg0BK1qdCfQaytl55EHguT5ACCVfQV6gYkymJW9/Tzl8b+XBDK/ X-Received: by 2002:a17:902:bcc5:: with SMTP id o5-v6mr3383484pls.86.1519949492541; Thu, 01 Mar 2018 16:11:32 -0800 (PST) ARC-Seal: i=1; a=rsa-sha256; t=1519949492; cv=none; d=google.com; s=arc-20160816; b=dK9MTd/ab0/fZudhAXJeg9l7WSscFXfrRoKy0fheUT3ElBfuCo1+CXhBm+dPPKaG5A +wLJoHge4mhE4nl1bDlV2eiZ6SuAcQrW/PaKQjlhs75dq16WNOJB3qu+uKtjdU4UmXMt 8uoN3xRDaBx+u16f8gcq/mqtsRSoliMZpu8b0HiP6jaLF0WeBIxivtFj3pNB3UO5QyqB UwpyI6oIopkZ5hIrX58cIE83EwqmFxR5q6IrqBonlcHMmF1i83TYvsGbeNEA2CfDopQS z4hIN8/nSHofFiyXYEa5a55ObFOuweoH8sy9oSEXksDK5C7pvymV0tB+uUGnHfZVoo+m TE7Q== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:sender:content-transfer-encoding:mime-version :user-agent:references:in-reply-to:message-id:cc:subject:date:to :from:arc-authentication-results; bh=zVMrCPUaw+4TpOskXCG5u6aEOTiLVmHQFWeXuPU0He4=; b=Q+K+xNzsNJwoA9ONZVKg80+4bFlviFsSfVldoi6d78cDogt/0EgJrMlWqwaeFr8DqD soJysYRei36YYWj/t1R1DkGQzBXIz78zGrUMydOFsdOEFVkgvw85TcK9xKXZl5BXX5y+ gV4o7GfYgOzDxnqkQ3t5X+dpskM68HVFfiD2qLXl98aXDEXaer9eZ/9BkGZHfjkgtWp0 LfrUEB3bS2P/dcystMl4Whg6B5OS8EFSKHjJTRKF61pSdFNNDWpnyaKzsfA6PDBTEEBq pl845Z5Nn0cq3NIK48H0+zU+XN4adf60ZDnz8KenD7s/0lLsDCJtSuOAV5bG8KXuKMd6 L7xw== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Return-Path: Received: from vger.kernel.org (vger.kernel.org. [209.132.180.67]) by mx.google.com with ESMTP id x14si3183209pgq.168.2018.03.01.16.11.17; Thu, 01 Mar 2018 16:11:32 -0800 (PST) Received-SPF: pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) client-ip=209.132.180.67; Authentication-Results: mx.google.com; spf=pass (google.com: best guess record for domain of linux-kernel-owner@vger.kernel.org designates 209.132.180.67 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S1163346AbeCAXdY (ORCPT + 99 others); Thu, 1 Mar 2018 18:33:24 -0500 Received: from mx2.suse.de ([195.135.220.15]:54225 "EHLO mx2.suse.de" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S1163009AbeCAXdT (ORCPT ); Thu, 1 Mar 2018 18:33:19 -0500 X-Virus-Scanned: by amavisd-new at test-mx.suse.de Received: from relay2.suse.de (charybdis-ext.suse.de [195.135.220.254]) by mx2.suse.de (Postfix) with ESMTP id D607FB46D; Thu, 1 Mar 2018 23:33:17 +0000 (UTC) From: NeilBrown To: Oleg Drokin , Greg Kroah-Hartman , James Simmons , Andreas Dilger Date: Fri, 02 Mar 2018 10:31:25 +1100 Subject: [PATCH 09/17] staging: lustre: ldlm: use delayed_work for pools_recalc Cc: Linux Kernel Mailing List , Lustre Development List Message-ID: <151994708534.7628.3824921218925924808.stgit@noble> In-Reply-To: <151994679573.7628.1024109499321778846.stgit@noble> References: <151994679573.7628.1024109499321778846.stgit@noble> User-Agent: StGit/0.17.1-dirty MIME-Version: 1.0 Content-Type: text/plain; charset="utf-8" Content-Transfer-Encoding: 7bit Sender: linux-kernel-owner@vger.kernel.org Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org ldlm currenty has a kthread which wakes up every so often and calls ldlm_pools_recalc(). The thread is started and stopped, but no other external interactions happen. This can trivially be replaced by a delayed_work if we have ldlm_pools_recalc() reschedule the work rather than just report when to do that. Signed-off-by: NeilBrown --- drivers/staging/lustre/lustre/ldlm/ldlm_pool.c | 99 +++--------------------- 1 file changed, 11 insertions(+), 88 deletions(-) diff --git a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c index a0e486b57e08..53b8f33e54b5 100644 --- a/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c +++ b/drivers/staging/lustre/lustre/ldlm/ldlm_pool.c @@ -784,9 +784,6 @@ static int ldlm_pool_granted(struct ldlm_pool *pl) return atomic_read(&pl->pl_granted); } -static struct ptlrpc_thread *ldlm_pools_thread; -static struct completion ldlm_pools_comp; - /* * count locks from all namespaces (if possible). Returns number of * cached locks. @@ -899,8 +896,12 @@ static unsigned long ldlm_pools_cli_scan(struct shrinker *s, sc->gfp_mask); } -static int ldlm_pools_recalc(enum ldlm_side client) +static void ldlm_pools_recalc(struct work_struct *ws); +static DECLARE_DELAYED_WORK(ldlm_recalc_pools, ldlm_pools_recalc); + +static void ldlm_pools_recalc(struct work_struct *ws) { + enum ldlm_side client = LDLM_NAMESPACE_CLIENT; struct ldlm_namespace *ns; struct ldlm_namespace *ns_old = NULL; /* seconds of sleep if no active namespaces */ @@ -982,92 +983,19 @@ static int ldlm_pools_recalc(enum ldlm_side client) /* Wake up the blocking threads from time to time. */ ldlm_bl_thread_wakeup(); - return time; -} - -static int ldlm_pools_thread_main(void *arg) -{ - struct ptlrpc_thread *thread = (struct ptlrpc_thread *)arg; - int c_time; - - thread_set_flags(thread, SVC_RUNNING); - wake_up(&thread->t_ctl_waitq); - - CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n", - "ldlm_poold", current_pid()); - - while (1) { - /* - * Recal all pools on this tick. - */ - c_time = ldlm_pools_recalc(LDLM_NAMESPACE_CLIENT); - - /* - * Wait until the next check time, or until we're - * stopped. - */ - wait_event_idle_timeout(thread->t_ctl_waitq, - thread_is_stopping(thread) || - thread_is_event(thread), - c_time * HZ); - - if (thread_test_and_clear_flags(thread, SVC_STOPPING)) - break; - thread_test_and_clear_flags(thread, SVC_EVENT); - } - - thread_set_flags(thread, SVC_STOPPED); - wake_up(&thread->t_ctl_waitq); - - CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n", - "ldlm_poold", current_pid()); - - complete_and_exit(&ldlm_pools_comp, 0); + schedule_delayed_work(&ldlm_recalc_pools, time * HZ); } static int ldlm_pools_thread_start(void) { - struct task_struct *task; - - if (ldlm_pools_thread) - return -EALREADY; - - ldlm_pools_thread = kzalloc(sizeof(*ldlm_pools_thread), GFP_NOFS); - if (!ldlm_pools_thread) - return -ENOMEM; - - init_completion(&ldlm_pools_comp); - init_waitqueue_head(&ldlm_pools_thread->t_ctl_waitq); + schedule_delayed_work(&ldlm_recalc_pools, 0); - task = kthread_run(ldlm_pools_thread_main, ldlm_pools_thread, - "ldlm_poold"); - if (IS_ERR(task)) { - CERROR("Can't start pool thread, error %ld\n", PTR_ERR(task)); - kfree(ldlm_pools_thread); - ldlm_pools_thread = NULL; - return PTR_ERR(task); - } - wait_event_idle(ldlm_pools_thread->t_ctl_waitq, - thread_is_running(ldlm_pools_thread)); return 0; } static void ldlm_pools_thread_stop(void) { - if (!ldlm_pools_thread) - return; - - thread_set_flags(ldlm_pools_thread, SVC_STOPPING); - wake_up(&ldlm_pools_thread->t_ctl_waitq); - - /* - * Make sure that pools thread is finished before freeing @thread. - * This fixes possible race and oops due to accessing freed memory - * in pools thread. - */ - wait_for_completion(&ldlm_pools_comp); - kfree(ldlm_pools_thread); - ldlm_pools_thread = NULL; + cancel_delayed_work_sync(&ldlm_recalc_pools); } static struct shrinker ldlm_pools_cli_shrinker = { @@ -1081,20 +1009,15 @@ int ldlm_pools_init(void) int rc; rc = ldlm_pools_thread_start(); - if (rc) - return rc; - - rc = register_shrinker(&ldlm_pools_cli_shrinker); - if (rc) - ldlm_pools_thread_stop(); + if (!rc) + rc = register_shrinker(&ldlm_pools_cli_shrinker); return rc; } void ldlm_pools_fini(void) { - if (ldlm_pools_thread) - unregister_shrinker(&ldlm_pools_cli_shrinker); + unregister_shrinker(&ldlm_pools_cli_shrinker); ldlm_pools_thread_stop(); }