Andy reported that if file page get reclaimed we loose soft-dirty bit
if it was there, so save _PAGE_BIT_SOFT_DIRTY bit when page address
get encoded into pte entry. Thus when #pf happens on such non-present
pte we can restore it back.
Reported-by: Andy Lutomirski <[email protected]>
Signed-off-by: Cyrill Gorcunov <[email protected]>
Cc: Pavel Emelyanov <[email protected]>
Cc: Andrew Morton <[email protected]>
Cc: Matt Mackall <[email protected]>
Cc: Xiao Guangrong <[email protected]>
Cc: Marcelo Tosatti <[email protected]>
Cc: KOSAKI Motohiro <[email protected]>
Cc: Stephen Rothwell <[email protected]>
---
arch/x86/include/asm/pgtable-2level.h | 48 +++++++++++++++++++++++++++++++++-
arch/x86/include/asm/pgtable-3level.h | 3 ++
arch/x86/include/asm/pgtable.h | 15 ++++++++++
arch/x86/include/asm/pgtable_types.h | 4 ++
fs/proc/task_mmu.c | 2 +
mm/fremap.c | 14 +++++++--
mm/memory.c | 20 +++++++++++---
mm/rmap.c | 10 +++++--
8 files changed, 105 insertions(+), 11 deletions(-)
Index: linux-2.6.git/arch/x86/include/asm/pgtable-2level.h
===================================================================
--- linux-2.6.git.orig/arch/x86/include/asm/pgtable-2level.h
+++ linux-2.6.git/arch/x86/include/asm/pgtable-2level.h
@@ -55,9 +55,53 @@ static inline pmd_t native_pmdp_get_and_
#define native_pmdp_get_and_clear(xp) native_local_pmdp_get_and_clear(xp)
#endif
+#ifdef CONFIG_MEM_SOFT_DIRTY
+
+/*
+ * Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE, _PAGE_BIT_SOFT_DIRTY and
+ * _PAGE_BIT_PROTNONE are taken, split up the 28 bits of offset
+ * into this range.
+ */
+#define PTE_FILE_MAX_BITS 28
+#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
+#define PTE_FILE_SHIFT2 (_PAGE_BIT_FILE + 1)
+#define PTE_FILE_SHIFT3 (_PAGE_BIT_PROTNONE + 1)
+#define PTE_FILE_SHIFT4 (_PAGE_BIT_SOFT_DIRTY + 1)
+#define PTE_FILE_BITS1 (PTE_FILE_SHIFT2 - PTE_FILE_SHIFT1 - 1)
+#define PTE_FILE_BITS2 (PTE_FILE_SHIFT3 - PTE_FILE_SHIFT2 - 1)
+#define PTE_FILE_BITS3 (PTE_FILE_SHIFT4 - PTE_FILE_SHIFT3 - 1)
+
+#define pte_to_pgoff(pte) \
+ ((((pte).pte_low >> (PTE_FILE_SHIFT1)) \
+ & ((1U << PTE_FILE_BITS1) - 1))) \
+ + ((((pte).pte_low >> (PTE_FILE_SHIFT2)) \
+ & ((1U << PTE_FILE_BITS2) - 1)) \
+ << (PTE_FILE_BITS1)) \
+ + ((((pte).pte_low >> (PTE_FILE_SHIFT3)) \
+ & ((1U << PTE_FILE_BITS3) - 1)) \
+ << (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
+ + ((((pte).pte_low >> (PTE_FILE_SHIFT4))) \
+ << (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))
+
+#define pgoff_to_pte(off) \
+ ((pte_t) { .pte_low = \
+ ((((off)) & ((1U << PTE_FILE_BITS1) - 1)) << PTE_FILE_SHIFT1) \
+ + ((((off) >> PTE_FILE_BITS1) \
+ & ((1U << PTE_FILE_BITS2) - 1)) \
+ << PTE_FILE_SHIFT2) \
+ + ((((off) >> (PTE_FILE_BITS1 + PTE_FILE_BITS2)) \
+ & ((1U << PTE_FILE_BITS3) - 1)) \
+ << PTE_FILE_SHIFT3) \
+ + ((((off) >> \
+ (PTE_FILE_BITS1 + PTE_FILE_BITS2 + PTE_FILE_BITS3))) \
+ << PTE_FILE_SHIFT4) \
+ + _PAGE_FILE })
+
+#else /* CONFIG_MEM_SOFT_DIRTY */
+
/*
* Bits _PAGE_BIT_PRESENT, _PAGE_BIT_FILE and _PAGE_BIT_PROTNONE are taken,
- * split up the 29 bits of offset into this range:
+ * split up the 29 bits of offset into this range.
*/
#define PTE_FILE_MAX_BITS 29
#define PTE_FILE_SHIFT1 (_PAGE_BIT_PRESENT + 1)
@@ -88,6 +132,8 @@ static inline pmd_t native_pmdp_get_and_
<< PTE_FILE_SHIFT3) \
+ _PAGE_FILE })
+#endif /* CONFIG_MEM_SOFT_DIRTY */
+
/* Encode and de-code a swap entry */
#if _PAGE_BIT_FILE < _PAGE_BIT_PROTNONE
#define SWP_TYPE_BITS (_PAGE_BIT_FILE - _PAGE_BIT_PRESENT - 1)
Index: linux-2.6.git/arch/x86/include/asm/pgtable-3level.h
===================================================================
--- linux-2.6.git.orig/arch/x86/include/asm/pgtable-3level.h
+++ linux-2.6.git/arch/x86/include/asm/pgtable-3level.h
@@ -179,6 +179,9 @@ static inline pmd_t native_pmdp_get_and_
/*
* Bits 0, 6 and 7 are taken in the low part of the pte,
* put the 32 bits of offset into the high part.
+ *
+ * For soft-dirty tracking 11 bit is taken from
+ * the low part of pte as well.
*/
#define pte_to_pgoff(pte) ((pte).pte_high)
#define pgoff_to_pte(off) \
Index: linux-2.6.git/arch/x86/include/asm/pgtable.h
===================================================================
--- linux-2.6.git.orig/arch/x86/include/asm/pgtable.h
+++ linux-2.6.git/arch/x86/include/asm/pgtable.h
@@ -329,6 +329,21 @@ static inline pte_t pte_swp_clear_soft_d
return pte_clear_flags(pte, _PAGE_SWP_SOFT_DIRTY);
}
+static inline pte_t pte_file_clear_soft_dirty(pte_t pte)
+{
+ return pte_clear_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline pte_t pte_file_mksoft_dirty(pte_t pte)
+{
+ return pte_set_flags(pte, _PAGE_SOFT_DIRTY);
+}
+
+static inline int pte_file_soft_dirty(pte_t pte)
+{
+ return pte_flags(pte) & _PAGE_SOFT_DIRTY;
+}
+
/*
* Mask out unsupported bits in a present pgprot. Non-present pgprots
* can use those bits for other purposes, so leave them be.
Index: linux-2.6.git/arch/x86/include/asm/pgtable_types.h
===================================================================
--- linux-2.6.git.orig/arch/x86/include/asm/pgtable_types.h
+++ linux-2.6.git/arch/x86/include/asm/pgtable_types.h
@@ -61,8 +61,10 @@
* they do not conflict with each other.
*/
+#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_HIDDEN
+
#ifdef CONFIG_MEM_SOFT_DIRTY
-#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
+#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
#else
#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
#endif
Index: linux-2.6.git/fs/proc/task_mmu.c
===================================================================
--- linux-2.6.git.orig/fs/proc/task_mmu.c
+++ linux-2.6.git/fs/proc/task_mmu.c
@@ -736,6 +736,8 @@ static inline void clear_soft_dirty(stru
ptent = pte_clear_flags(ptent, _PAGE_SOFT_DIRTY);
} else if (pte_swp_soft_dirty(ptent)) {
ptent = pte_swp_clear_soft_dirty(ptent);
+ } else if (pte_file(ptent)) {
+ ptent = pte_file_clear_soft_dirty(ptent);
}
set_pte_at(vma->vm_mm, addr, pte, ptent);
Index: linux-2.6.git/mm/fremap.c
===================================================================
--- linux-2.6.git.orig/mm/fremap.c
+++ linux-2.6.git/mm/fremap.c
@@ -57,17 +57,25 @@ static int install_file_pte(struct mm_st
unsigned long addr, unsigned long pgoff, pgprot_t prot)
{
int err = -ENOMEM;
- pte_t *pte;
+ pte_t *pte, ptfile;
spinlock_t *ptl;
pte = get_locked_pte(mm, addr, &ptl);
if (!pte)
goto out;
- if (!pte_none(*pte))
+ ptfile = pgoff_to_pte(pgoff);
+
+ if (!pte_none(*pte)) {
+#ifdef CONFIG_MEM_SOFT_DIRTY
+ if (pte_present(*pte) &&
+ pte_soft_dirty(*pte))
+ pte_file_mksoft_dirty(ptfile);
+#endif
zap_pte(mm, vma, addr, pte);
+ }
- set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
+ set_pte_at(mm, addr, pte, ptfile);
/*
* We don't need to run update_mmu_cache() here because the "file pte"
* being installed by install_file_pte() is not a real pte - it's a
Index: linux-2.6.git/mm/memory.c
===================================================================
--- linux-2.6.git.orig/mm/memory.c
+++ linux-2.6.git/mm/memory.c
@@ -1141,9 +1141,14 @@ again:
continue;
if (unlikely(details) && details->nonlinear_vma
&& linear_page_index(details->nonlinear_vma,
- addr) != page->index)
- set_pte_at(mm, addr, pte,
- pgoff_to_pte(page->index));
+ addr) != page->index) {
+ pte_t ptfile = pgoff_to_pte(page->index);
+#ifdef CONFIG_MEM_SOFT_DIRTY
+ if (pte_soft_dirty(ptent))
+ pte_file_mksoft_dirty(ptfile);
+#endif
+ set_pte_at(mm, addr, pte, ptfile);
+ }
if (PageAnon(page))
rss[MM_ANONPAGES]--;
else {
@@ -3410,8 +3415,15 @@ static int __do_fault(struct mm_struct *
if (likely(pte_same(*page_table, orig_pte))) {
flush_icache_page(vma, page);
entry = mk_pte(page, vma->vm_page_prot);
- if (flags & FAULT_FLAG_WRITE)
+ if (flags & FAULT_FLAG_WRITE) {
entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ }
+#ifdef CONFIG_MEM_SOFT_DIRTY
+ else if (pte_file(orig_pte) &&
+ pte_file_soft_dirty(orig_pte)) {
+ pte_mksoft_dirty(entry);
+ }
+#endif
if (anon) {
inc_mm_counter_fast(mm, MM_ANONPAGES);
page_add_new_anon_rmap(page, vma, address);
Index: linux-2.6.git/mm/rmap.c
===================================================================
--- linux-2.6.git.orig/mm/rmap.c
+++ linux-2.6.git/mm/rmap.c
@@ -1407,8 +1407,14 @@ static int try_to_unmap_cluster(unsigned
pteval = ptep_clear_flush(vma, address, pte);
/* If nonlinear, store the file page offset in the pte. */
- if (page->index != linear_page_index(vma, address))
- set_pte_at(mm, address, pte, pgoff_to_pte(page->index));
+ if (page->index != linear_page_index(vma, address)) {
+ pte_t ptfile = pgoff_to_pte(page->index);
+#ifdef CONFIG_MEM_SOFT_DIRTY
+ if (pte_soft_dirty(pteval))
+ pte_file_mksoft_dirty(ptfile);
+#endif
+ set_pte_at(mm, address, pte, ptfile);
+ }
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
On Fri, Jul 26, 2013 at 1:18 PM, Cyrill Gorcunov <[email protected]> wrote:
> Andy reported that if file page get reclaimed we loose soft-dirty bit
> if it was there, so save _PAGE_BIT_SOFT_DIRTY bit when page address
> get encoded into pte entry. Thus when #pf happens on such non-present
> pte we can restore it back.
>
Unless I'm misunderstanding this, it's saving the bit in the
non-present PTE. This sounds wrong -- what happens if the entire pmd
(or whatever the next level is called) gets zapped? (Also, what
happens if you unmap a file and map a different file there?)
--Andy
On Fri, Jul 26, 2013 at 01:55:04PM -0700, Andy Lutomirski wrote:
> On Fri, Jul 26, 2013 at 1:18 PM, Cyrill Gorcunov <[email protected]> wrote:
> > Andy reported that if file page get reclaimed we loose soft-dirty bit
> > if it was there, so save _PAGE_BIT_SOFT_DIRTY bit when page address
> > get encoded into pte entry. Thus when #pf happens on such non-present
> > pte we can restore it back.
> >
>
> Unless I'm misunderstanding this, it's saving the bit in the
> non-present PTE. This sounds wrong -- what happens if the entire pmd
It's the same as encoding pgoff in pte entry (pte is not present),
but together with pgoff we save soft-bit status, later on #pf we decode
pgoff and restore softbit back if it was there, pte itself can't disappear
since it holds pgoff information.
> (or whatever the next level is called) gets zapped? (Also, what
> happens if you unmap a file and map a different file there?)
If file pages are remapped to a new place we remember softdity
bit status previously has and propagate it to a new pte (as in
install_file_pte, old ptes are cleared).
If file unmapped then new one mapped, pmd/ptes are cleared
(including softbit) and it remains clear until new write
happens, if only i've not missed something obvious.
On Fri, Jul 26, 2013 at 2:18 PM, Cyrill Gorcunov <[email protected]> wrote:
> On Fri, Jul 26, 2013 at 01:55:04PM -0700, Andy Lutomirski wrote:
>> On Fri, Jul 26, 2013 at 1:18 PM, Cyrill Gorcunov <[email protected]> wrote:
>> > Andy reported that if file page get reclaimed we loose soft-dirty bit
>> > if it was there, so save _PAGE_BIT_SOFT_DIRTY bit when page address
>> > get encoded into pte entry. Thus when #pf happens on such non-present
>> > pte we can restore it back.
>> >
>>
>> Unless I'm misunderstanding this, it's saving the bit in the
>> non-present PTE. This sounds wrong -- what happens if the entire pmd
>
> It's the same as encoding pgoff in pte entry (pte is not present),
> but together with pgoff we save soft-bit status, later on #pf we decode
> pgoff and restore softbit back if it was there, pte itself can't disappear
> since it holds pgoff information.
Isn't that only the case for nonlinear mappings?
--Andy
On Fri, Jul 26, 2013 at 02:36:51PM -0700, Andy Lutomirski wrote:
> >> Unless I'm misunderstanding this, it's saving the bit in the
> >> non-present PTE. This sounds wrong -- what happens if the entire pmd
> >
> > It's the same as encoding pgoff in pte entry (pte is not present),
> > but together with pgoff we save soft-bit status, later on #pf we decode
> > pgoff and restore softbit back if it was there, pte itself can't disappear
> > since it holds pgoff information.
>
> Isn't that only the case for nonlinear mappings?
Andy, I'm somehow lost, pte either exist with file encoded, either not,
when pud/ptes are zapped and any access to it should cause #pf pointing
kernel to read/write data from file to a page, if it happens on write
the pte is obtaining dirty bit (which always set together with soft
bit).
On Fri, Jul 26, 2013 at 11:25 PM, Cyrill Gorcunov <[email protected]> wrote:
> On Fri, Jul 26, 2013 at 02:36:51PM -0700, Andy Lutomirski wrote:
>> >> Unless I'm misunderstanding this, it's saving the bit in the
>> >> non-present PTE. This sounds wrong -- what happens if the entire pmd
>> >
>> > It's the same as encoding pgoff in pte entry (pte is not present),
>> > but together with pgoff we save soft-bit status, later on #pf we decode
>> > pgoff and restore softbit back if it was there, pte itself can't disappear
>> > since it holds pgoff information.
>>
>> Isn't that only the case for nonlinear mappings?
>
> Andy, I'm somehow lost, pte either exist with file encoded, either not,
> when pud/ptes are zapped and any access to it should cause #pf pointing
> kernel to read/write data from file to a page, if it happens on write
> the pte is obtaining dirty bit (which always set together with soft
> bit).
Hmm. I may have been wrong.
By my reading of this stuff, when a pte is freed to reclaim memory, if
it's an un-cowed file mapping, it's cleared completely by
zap_pte_range -- no swap entry is left behind. That's this code in
zap_pte_range:
/*
* unmap_shared_mapping_pages() wants to
* invalidate cache without truncating:
* unmap shared but keep private pages.
*/
if (details->check_mapping &&
details->check_mapping != page->mapping)
continue;
In theory, if you map 2MB (on x86_64) of a file as MAP_PRIVATE,
aligned, then you get a whole pmd. If you don't write any of it
(triggering COW), the kernel could, in theory, free all those ptes, so
you can't save any state in there. (I can't find any code that does
this, though.)
That being said, a MAP_PRIVATE, un-cowed mapping must be clean -- if
it had been (soft-)dirtied, it would also have been cowed. So you
might be okay.
--Andy
On 07/27/2013 12:55 AM, Andy Lutomirski wrote:
> On Fri, Jul 26, 2013 at 1:18 PM, Cyrill Gorcunov <[email protected]> wrote:
>> Andy reported that if file page get reclaimed we loose soft-dirty bit
>> if it was there, so save _PAGE_BIT_SOFT_DIRTY bit when page address
>> get encoded into pte entry. Thus when #pf happens on such non-present
>> pte we can restore it back.
>>
>
> Unless I'm misunderstanding this, it's saving the bit in the
> non-present PTE. This sounds wrong -- what happens if the entire pmd
> (or whatever the next level is called) gets zapped? (Also, what
> happens if you unmap a file and map a different file there?)
The whole pte gets zapped on vma unmap, and in this case forgetting
the soft-dirty bit completely is OK.
> --Andy
> .
>
On Sat, Jul 27, 2013 at 10:06:01AM -0700, Andy Lutomirski wrote:
>
> That being said, a MAP_PRIVATE, un-cowed mapping must be clean -- if
> it had been (soft-)dirtied, it would also have been cowed. So you
> might be okay.
Yas, as far as I know we are either cow'ed or in clean state, thus
either soft-bit set on #pf (and when reclaimed rest in file-pte)
or it remains clean and there is no change we need to track.
On 07/27/2013 11:29 PM, Pavel Emelyanov wrote:
> On 07/27/2013 12:55 AM, Andy Lutomirski wrote:
>> On Fri, Jul 26, 2013 at 1:18 PM, Cyrill Gorcunov <[email protected]> wrote:
>>> Andy reported that if file page get reclaimed we loose soft-dirty bit
>>> if it was there, so save _PAGE_BIT_SOFT_DIRTY bit when page address
>>> get encoded into pte entry. Thus when #pf happens on such non-present
>>> pte we can restore it back.
>>>
>>
>> Unless I'm misunderstanding this, it's saving the bit in the
>> non-present PTE. This sounds wrong -- what happens if the entire pmd
>> (or whatever the next level is called) gets zapped? (Also, what
>> happens if you unmap a file and map a different file there?)
>
> The whole pte gets zapped on vma unmap, and in this case forgetting
> the soft-dirty bit completely is OK.
I mean -- soft-dirty bits denote changes in the vm area, if you remove
one, then it can be found out from the /proc/pid/maps file that the
vma has disappeared.
But one problem really went unnoticed here -- if we map a new vma in
place of some old one with the same flags and prots. It looks like we
need a vma soft-dirty mark, that is set on mmap and mremap, is cleared
on soft dirty clear and is propagated into pte pagemap bits.
>> --Andy
Thanks,
Pavel
On 07/27/2013 12:18 AM, Cyrill Gorcunov wrote:
> Andy reported that if file page get reclaimed we loose soft-dirty bit
> if it was there, so save _PAGE_BIT_SOFT_DIRTY bit when page address
> get encoded into pte entry. Thus when #pf happens on such non-present
> pte we can restore it back.
>
> Reported-by: Andy Lutomirski <[email protected]>
> Signed-off-by: Cyrill Gorcunov <[email protected]>
> Cc: Pavel Emelyanov <[email protected]>
> Cc: Andrew Morton <[email protected]>
> Cc: Matt Mackall <[email protected]>
> Cc: Xiao Guangrong <[email protected]>
> Cc: Marcelo Tosatti <[email protected]>
> Cc: KOSAKI Motohiro <[email protected]>
> Cc: Stephen Rothwell <[email protected]>
> ---
> @@ -57,17 +57,25 @@ static int install_file_pte(struct mm_st
> unsigned long addr, unsigned long pgoff, pgprot_t prot)
> {
> int err = -ENOMEM;
> - pte_t *pte;
> + pte_t *pte, ptfile;
> spinlock_t *ptl;
>
> pte = get_locked_pte(mm, addr, &ptl);
> if (!pte)
> goto out;
>
> - if (!pte_none(*pte))
> + ptfile = pgoff_to_pte(pgoff);
> +
> + if (!pte_none(*pte)) {
> +#ifdef CONFIG_MEM_SOFT_DIRTY
> + if (pte_present(*pte) &&
> + pte_soft_dirty(*pte))
I think there's no need in wrapping every such if () inside #ifdef CONFIG_...,
since the pte_soft_dirty() routine itself would be 0 for non-soft-dirty case
and compiler would optimize this code out.
> + pte_file_mksoft_dirty(ptfile);
> +#endif
> zap_pte(mm, vma, addr, pte);
> + }
>
> - set_pte_at(mm, addr, pte, pgoff_to_pte(pgoff));
> + set_pte_at(mm, addr, pte, ptfile);
> /*
> * We don't need to run update_mmu_cache() here because the "file pte"
> * being installed by install_file_pte() is not a real pte - it's a
On Mon, Jul 29, 2013 at 06:08:55PM +0400, Pavel Emelyanov wrote:
> >
> > - if (!pte_none(*pte))
> > + ptfile = pgoff_to_pte(pgoff);
> > +
> > + if (!pte_none(*pte)) {
> > +#ifdef CONFIG_MEM_SOFT_DIRTY
> > + if (pte_present(*pte) &&
> > + pte_soft_dirty(*pte))
>
> I think there's no need in wrapping every such if () inside #ifdef CONFIG_...,
> since the pte_soft_dirty() routine itself would be 0 for non-soft-dirty case
> and compiler would optimize this code out.
If only I'm not missing something obvious, this code compiles not only on x86,
CONFIG_MEM_SOFT_DIRTY depends on x86 (otherwise I'll have to implement
pte_soft_dirty for all archs).
On 07/29/2013 06:14 PM, Cyrill Gorcunov wrote:
> On Mon, Jul 29, 2013 at 06:08:55PM +0400, Pavel Emelyanov wrote:
>>>
>>> - if (!pte_none(*pte))
>>> + ptfile = pgoff_to_pte(pgoff);
>>> +
>>> + if (!pte_none(*pte)) {
>>> +#ifdef CONFIG_MEM_SOFT_DIRTY
>>> + if (pte_present(*pte) &&
>>> + pte_soft_dirty(*pte))
>>
>> I think there's no need in wrapping every such if () inside #ifdef CONFIG_...,
>> since the pte_soft_dirty() routine itself would be 0 for non-soft-dirty case
>> and compiler would optimize this code out.
>
> If only I'm not missing something obvious, this code compiles not only on x86,
> CONFIG_MEM_SOFT_DIRTY depends on x86 (otherwise I'll have to implement
> pte_soft_dirty for all archs).
For non-x86 case there are stubs in include/asm-generic/pgtable.h that would
act as if the CONFIG_MEM_SOFT_DIRTY is off.
Thanks,
Pavel
On Mon, Jul 29, 2013 at 06:24:39PM +0400, Pavel Emelyanov wrote:
>
> For non-x86 case there are stubs in include/asm-generic/pgtable.h that would
> act as if the CONFIG_MEM_SOFT_DIRTY is off.
Yeah, thanks, I'll update.
Cyrill Gorcunov <[email protected]> writes:
> On Mon, Jul 29, 2013 at 06:08:55PM +0400, Pavel Emelyanov wrote:
>> >
>> > - if (!pte_none(*pte))
>> > + ptfile = pgoff_to_pte(pgoff);
>> > +
>> > + if (!pte_none(*pte)) {
>> > +#ifdef CONFIG_MEM_SOFT_DIRTY
>> > + if (pte_present(*pte) &&
>> > + pte_soft_dirty(*pte))
>>
>> I think there's no need in wrapping every such if () inside #ifdef CONFIG_...,
>> since the pte_soft_dirty() routine itself would be 0 for non-soft-dirty case
>> and compiler would optimize this code out.
>
> If only I'm not missing something obvious, this code compiles not only on x86,
> CONFIG_MEM_SOFT_DIRTY depends on x86 (otherwise I'll have to implement
> pte_soft_dirty for all archs).
why not
#ifndef pte_soft_dirty
#define pte_soft_dirty(pte) 0
#endif
and on x86
#define pte_soft_dirty pte_soft_dirty
-aneesh
On Mon, Jul 29, 2013 at 08:28:54PM +0530, Aneesh Kumar K.V wrote:
> >
> > If only I'm not missing something obvious, this code compiles not only on x86,
> > CONFIG_MEM_SOFT_DIRTY depends on x86 (otherwise I'll have to implement
> > pte_soft_dirty for all archs).
>
> why not
>
> #ifndef pte_soft_dirty
> #define pte_soft_dirty(pte) 0
> #endif
>
> and on x86
> #define pte_soft_dirty pte_soft_dirty
Yeah, either this way, either to use asm-generic (later I think
more preferred), thanks!