Received: by 2002:a05:6a10:f3d0:0:0:0:0 with SMTP id a16csp1884376pxv; Fri, 2 Jul 2021 15:07:12 -0700 (PDT) X-Google-Smtp-Source: ABdhPJxWNm+yJNzztvamgdtKSG9K1aVXs5OnvbxttZCcfJAwaEoZQ0lQrT0vlaqake1dD/W4vyFx X-Received: by 2002:a17:906:dbec:: with SMTP id yd12mr1905807ejb.102.1625263632427; Fri, 02 Jul 2021 15:07:12 -0700 (PDT) ARC-Seal: i=1; a=rsa-sha256; t=1625263632; cv=none; d=google.com; s=arc-20160816; b=x4LJyPf/uvmcNtaUl15BbMZsMtd4YWx1qc5Htk5Dbom2N98QGGAqH6AEz4EteR9uGB nitJOSuuxNiQc0yYoXcgJ+ej79E6KqvACfCkGdN1RFJTpeWVL6gdmDVmX1v6Fw845p1A rAKSW8u0oP/sfK4IK5mELpaVtzxv6Nb0Jgs00E58qQwRgnj2Bwjszyss3cav01wbUHwA lDFwOYmr4p19mbtWuCO74HofJ30Cp3ECWk+R6bdMJXjQdC6u1ZOJ3ItEXtcgAtfhwRnq Zl4zjlPaXdTLGQjRm7/hNeYFVqRO+KomosBQwUgi7SD17J+C4zOwA6Z0s4wIv19ZnfE0 /MhQ== ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=google.com; s=arc-20160816; h=list-id:precedence:content-transfer-encoding:mime-version :references:in-reply-to:message-id:date:subject:cc:to:from; bh=jJO5fE+nr/w6yJgTYyAUmFC3JCRHa/VoDbjKX41Jznw=; b=VgHXYkabT00r4kIGzYe3h2+6jQaNRtHW38W80XiFvpdSZ24rgZlGa+pgNLEBwv0NqR y1QEctVJHEAypZ9vMjTx6ffGwKuf6sajrP4tyNkwWW19s1r7VqkEtid1fG2vHTTYJweD 59S+vOkaQtgBEcbyhdywklOBGJqcNZOnu2QUnQ3oJ8FgfbcZKC6nqaOvy1lwU2Adf407 hYf65ijRNXhEeYilZF/d+o7+8w6j/jp6EYoLr9+eFIFpHnVYMIfKNZpPP9KGQfxqJU1l hoRV5r16Av+Rz/Q2ODeMRbWO9Ob0YNLfvDeMOlwbbpIGl6lJvA4Wab2bJ5OSjcdV+ZB9 RT1Q== ARC-Authentication-Results: i=1; mx.google.com; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=intel.com Return-Path: Received: from vger.kernel.org (vger.kernel.org. [23.128.96.18]) by mx.google.com with ESMTP id dn15si4119646ejc.198.2021.07.02.15.06.45; Fri, 02 Jul 2021 15:07:12 -0700 (PDT) Received-SPF: pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) client-ip=23.128.96.18; Authentication-Results: mx.google.com; spf=pass (google.com: domain of linux-kernel-owner@vger.kernel.org designates 23.128.96.18 as permitted sender) smtp.mailfrom=linux-kernel-owner@vger.kernel.org; dmarc=fail (p=NONE sp=NONE dis=NONE) header.from=intel.com Received: (majordomo@vger.kernel.org) by vger.kernel.org via listexpand id S233240AbhGBWIE (ORCPT + 99 others); Fri, 2 Jul 2021 18:08:04 -0400 Received: from mga02.intel.com ([134.134.136.20]:51169 "EHLO mga02.intel.com" rhost-flags-OK-OK-OK-OK) by vger.kernel.org with ESMTP id S233006AbhGBWHy (ORCPT ); Fri, 2 Jul 2021 18:07:54 -0400 X-IronPort-AV: E=McAfee;i="6200,9189,10033"; a="195951889" X-IronPort-AV: E=Sophos;i="5.83,320,1616482800"; d="scan'208";a="195951889" Received: from fmsmga006.fm.intel.com ([10.253.24.20]) by orsmga101.jf.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 02 Jul 2021 15:05:20 -0700 X-IronPort-AV: E=Sophos;i="5.83,320,1616482800"; d="scan'208";a="642814696" Received: from ls.sc.intel.com (HELO localhost) ([143.183.96.54]) by fmsmga006-auth.fm.intel.com with ESMTP/TLS/ECDHE-RSA-AES256-GCM-SHA384; 02 Jul 2021 15:05:20 -0700 From: isaku.yamahata@intel.com To: Thomas Gleixner , Ingo Molnar , Borislav Petkov , "H . Peter Anvin" , Paolo Bonzini , Vitaly Kuznetsov , Wanpeng Li , Jim Mattson , Joerg Roedel , erdemaktas@google.com, Connor Kuehl , Sean Christopherson , x86@kernel.org, linux-kernel@vger.kernel.org, kvm@vger.kernel.org Cc: isaku.yamahata@intel.com, isaku.yamahata@gmail.com, Sean Christopherson , Kai Huang , Xiaoyao Li Subject: [RFC PATCH v2 09/69] KVM: TDX: Add C wrapper functions for TDX SEAMCALLs Date: Fri, 2 Jul 2021 15:04:15 -0700 Message-Id: <96e4e50feee62f476f2dcf170d20f9267c7d7d6a.1625186503.git.isaku.yamahata@intel.com> X-Mailer: git-send-email 2.25.1 In-Reply-To: References: MIME-Version: 1.0 Content-Transfer-Encoding: 8bit Precedence: bulk List-ID: X-Mailing-List: linux-kernel@vger.kernel.org From: Sean Christopherson TDX SEAMCALL interface is defined in [1] 20.2 Host-Side(SEAMCALL) interface Functions. Define C wrapper functions for SEAMCALLs which the later patches will use. [1] TDX Module spec https://software.intel.com/content/dam/develop/external/us/en/documents/tdx-module-1eas-v0.85.039.pdf Co-developed-by: Kai Huang Signed-off-by: Kai Huang Co-developed-by: Xiaoyao Li Signed-off-by: Xiaoyao Li Signed-off-by: Sean Christopherson Signed-off-by: Isaku Yamahata --- arch/x86/kvm/vmx/tdx_ops.h | 205 +++++++++++++++++++++++++++++++++++++ 1 file changed, 205 insertions(+) create mode 100644 arch/x86/kvm/vmx/tdx_ops.h diff --git a/arch/x86/kvm/vmx/tdx_ops.h b/arch/x86/kvm/vmx/tdx_ops.h new file mode 100644 index 000000000000..8afcffa267dc --- /dev/null +++ b/arch/x86/kvm/vmx/tdx_ops.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __KVM_X86_TDX_OPS_H +#define __KVM_X86_TDX_OPS_H + +#include + +#include +#include + +#include "seamcall.h" + +static inline u64 tdh_mng_addcx(hpa_t tdr, hpa_t addr) +{ + return seamcall(TDH_MNG_ADDCX, addr, tdr, 0, 0, 0, NULL); +} + +static inline u64 tdh_mem_page_add(hpa_t tdr, gpa_t gpa, hpa_t hpa, hpa_t source, + struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MEM_PAGE_ADD, gpa, tdr, hpa, source, 0, ex); +} + +static inline u64 tdh_mem_spet_add(hpa_t tdr, gpa_t gpa, int level, hpa_t page, + struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MEM_SEPT_ADD, gpa | level, tdr, page, 0, 0, ex); +} + +static inline u64 tdh_vp_addcx(hpa_t tdvpr, hpa_t addr) +{ + return seamcall(TDH_VP_ADDCX, addr, tdvpr, 0, 0, 0, NULL); +} + +static inline u64 tdh_mem_page_aug(hpa_t tdr, gpa_t gpa, hpa_t hpa, + struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MEM_PAGE_AUG, gpa, tdr, hpa, 0, 0, ex); +} + +static inline u64 tdh_mem_range_block(hpa_t tdr, gpa_t gpa, int level, + struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MEM_RANGE_BLOCK, gpa | level, tdr, 0, 0, 0, ex); +} + +static inline u64 tdh_mng_key_config(hpa_t tdr) +{ + return seamcall(TDH_MNG_KEY_CONFIG, tdr, 0, 0, 0, 0, NULL); +} + +static inline u64 tdh_mng_create(hpa_t tdr, int hkid) +{ + return seamcall(TDH_MNG_CREATE, tdr, hkid, 0, 0, 0, NULL); +} + +static inline u64 tdh_vp_create(hpa_t tdr, hpa_t tdvpr) +{ + return seamcall(TDH_VP_CREATE, tdvpr, tdr, 0, 0, 0, NULL); +} + +static inline u64 tdh_mng_rd(hpa_t tdr, u64 field, struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MNG_RD, tdr, field, 0, 0, 0, ex); +} + +static inline u64 tdh_mng_wr(hpa_t tdr, u64 field, u64 val, u64 mask, + struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MNG_WR, tdr, field, val, mask, 0, ex); +} + +static inline u64 tdh_phymem_page_rd(hpa_t addr, struct tdx_ex_ret *ex) +{ + return seamcall(TDH_PHYMEM_PAGE_RD, addr, 0, 0, 0, 0, ex); +} + +static inline u64 tdh_phymem_page_wr(hpa_t addr, u64 val, struct tdx_ex_ret *ex) +{ + return seamcall(TDH_PHYMEM_PAGE_WR, addr, val, 0, 0, 0, ex); +} + +static inline u64 tdh_mem_page_demote(hpa_t tdr, gpa_t gpa, int level, hpa_t page, + struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MEM_PAGE_DEMOTE, gpa | level, tdr, page, 0, 0, ex); +} + +static inline u64 tdh_mr_extend(hpa_t tdr, gpa_t gpa, struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MR_EXTEND, gpa, tdr, 0, 0, 0, ex); +} + +static inline u64 tdh_mr_finalize(hpa_t tdr) +{ + return seamcall(TDH_MR_FINALIZE, tdr, 0, 0, 0, 0, NULL); +} + +static inline u64 tdh_vp_flush(hpa_t tdvpr) +{ + return seamcall(TDH_VP_FLUSH, tdvpr, 0, 0, 0, 0, NULL); +} + +static inline u64 tdh_mng_vpflushdone(hpa_t tdr) +{ + return seamcall(TDH_MNG_VPFLUSHDONE, tdr, 0, 0, 0, 0, NULL); +} + +static inline u64 tdh_mng_key_freeid(hpa_t tdr) +{ + return seamcall(TDH_MNG_KEY_FREEID, tdr, 0, 0, 0, 0, NULL); +} + +static inline u64 tdh_mng_init(hpa_t tdr, hpa_t td_params, struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MNG_INIT, tdr, td_params, 0, 0, 0, ex); +} + +static inline u64 tdh_vp_init(hpa_t tdvpr, u64 rcx) +{ + return seamcall(TDH_VP_INIT, tdvpr, rcx, 0, 0, 0, NULL); +} + +static inline u64 tdh_mem_page_promote(hpa_t tdr, gpa_t gpa, int level, + struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MEM_PAGE_PROMOTE, gpa | level, tdr, 0, 0, 0, ex); +} + +static inline u64 tdh_phymem_page_rdmd(hpa_t page, struct tdx_ex_ret *ex) +{ + return seamcall(TDH_PHYMEM_PAGE_RDMD, page, 0, 0, 0, 0, ex); +} + +static inline u64 tdh_mem_sept_rd(hpa_t tdr, gpa_t gpa, int level, + struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MEM_SEPT_RD, gpa | level, tdr, 0, 0, 0, ex); +} + +static inline u64 tdh_vp_rd(hpa_t tdvpr, u64 field, struct tdx_ex_ret *ex) +{ + return seamcall(TDH_VP_RD, tdvpr, field, 0, 0, 0, ex); +} + +static inline u64 tdh_mng_key_reclaimid(hpa_t tdr) +{ + return seamcall(TDH_MNG_KEY_RECLAIMID, tdr, 0, 0, 0, 0, NULL); +} + +static inline u64 tdh_phymem_page_reclaim(hpa_t page, struct tdx_ex_ret *ex) +{ + return seamcall(TDH_PHYMEM_PAGE_RECLAIM, page, 0, 0, 0, 0, ex); +} + +static inline u64 tdh_mem_page_remove(hpa_t tdr, gpa_t gpa, int level, + struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MEM_PAGE_REMOVE, gpa | level, tdr, 0, 0, 0, ex); +} + +static inline u64 tdh_mem_sept_remove(hpa_t tdr, gpa_t gpa, int level, + struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MEM_SEPT_REMOVE, gpa | level, tdr, 0, 0, 0, ex); +} + +static inline u64 tdh_sys_lp_shutdown(void) +{ + return seamcall(TDH_SYS_LP_SHUTDOWN, 0, 0, 0, 0, 0, NULL); +} + +static inline u64 tdh_mem_track(hpa_t tdr) +{ + return seamcall(TDH_MEM_TRACK, tdr, 0, 0, 0, 0, NULL); +} + +static inline u64 tdh_mem_range_unblock(hpa_t tdr, gpa_t gpa, int level, + struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MEM_RANGE_UNBLOCK, gpa | level, tdr, 0, 0, 0, ex); +} + +static inline u64 tdh_phymem_cache_wb(bool resume) +{ + return seamcall(TDH_PHYMEM_CACHE_WB, resume ? 1 : 0, 0, 0, 0, 0, NULL); +} + +static inline u64 tdh_phymem_page_wbinvd(hpa_t page) +{ + return seamcall(TDH_PHYMEM_PAGE_WBINVD, page, 0, 0, 0, 0, NULL); +} + +static inline u64 tdh_mem_sept_wr(hpa_t tdr, gpa_t gpa, int level, u64 val, + struct tdx_ex_ret *ex) +{ + return seamcall(TDH_MEM_SEPT_WR, gpa | level, tdr, val, 0, 0, ex); +} + +static inline u64 tdh_vp_wr(hpa_t tdvpr, u64 field, u64 val, u64 mask, + struct tdx_ex_ret *ex) +{ + return seamcall(TDH_VP_WR, tdvpr, field, val, mask, 0, ex); +} + +#endif /* __KVM_X86_TDX_OPS_H */ -- 2.25.1