Currently, there is a race window between the point when the mutex is
unlocked in fastrpc_map_lookup and the reference count increasing
(fastrpc_map_get) in fastrpc_map_find, which can also lead to
use-after-free.
So lets merge fastrpc_map_find into fastrpc_map_lookup which allows us
to both protect the maps list by also taking the &fl->lock spinlock and
the reference count, since the spinlock will be released only after.
Add take_ref argument to make this suitable for all callers.
Fixes: 8f6c1d8c4f0c ("misc: fastrpc: Add fdlist implementation")
Co-developed-by: Ola Jeppsson <[email protected]>
Signed-off-by: Ola Jeppsson <[email protected]>
Signed-off-by: Abel Vesa <[email protected]>
---
drivers/misc/fastrpc.c | 41 +++++++++++++++++++++--------------------
1 file changed, 21 insertions(+), 20 deletions(-)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 93ebd174d848..0c816a11eeec 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -333,30 +333,31 @@ static void fastrpc_map_get(struct fastrpc_map *map)
static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
- struct fastrpc_map **ppmap)
+ struct fastrpc_map **ppmap, bool take_ref)
{
+ struct fastrpc_session_ctx *sess = fl->sctx;
struct fastrpc_map *map = NULL;
+ int ret = -ENOENT;
- mutex_lock(&fl->mutex);
+ spin_lock(&fl->lock);
list_for_each_entry(map, &fl->maps, node) {
- if (map->fd == fd) {
- *ppmap = map;
- mutex_unlock(&fl->mutex);
- return 0;
- }
- }
- mutex_unlock(&fl->mutex);
-
- return -ENOENT;
-}
+ if (map->fd != fd)
+ continue;
-static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
- struct fastrpc_map **ppmap)
-{
- int ret = fastrpc_map_lookup(fl, fd, ppmap);
+ if (take_ref) {
+ ret = fastrpc_map_get(map);
+ if (ret) {
+ dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
+ __func__, fd, ret);
+ break;
+ }
+ }
- if (!ret)
- fastrpc_map_get(*ppmap);
+ *ppmap = map;
+ ret = 0;
+ break;
+ }
+ spin_unlock(&fl->lock);
return ret;
}
@@ -703,7 +704,7 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
struct fastrpc_map *map = NULL;
int err = 0;
- if (!fastrpc_map_find(fl, fd, ppmap))
+ if (!fastrpc_map_lookup(fl, fd, ppmap, true))
return 0;
map = kzalloc(sizeof(*map), GFP_KERNEL);
@@ -1026,7 +1027,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
if (!fdlist[i])
break;
- if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
+ if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
fastrpc_map_put(mmap);
}
--
2.34.1
From: Ola Jeppsson <[email protected]>
It is possible that in between calling fastrpc_map_get() until
map->fl->lock is taken in fastrpc_free_map(), another thread can call
fastrpc_map_lookup() and get a reference to a map that is about to be
deleted.
Rewrite fastrpc_map_get() to only increase the reference count of a map
if it's non-zero. Propagate this to callers so they can know if a map is
about to be deleted.
Fixes this warning:
refcount_t: addition on 0; use-after-free.
WARNING: CPU: 5 PID: 10100 at lib/refcount.c:25 refcount_warn_saturate
...
Call trace:
refcount_warn_saturate
[fastrpc_map_get inlined]
[fastrpc_map_lookup inlined]
fastrpc_map_create
fastrpc_internal_invoke
fastrpc_device_ioctl
__arm64_sys_ioctl
invoke_syscall
Fixes: c68cfb718c8f9 ("misc: fastrpc: Add support for context Invoke method")
Signed-off-by: Ola Jeppsson <[email protected]>
Signed-off-by: Abel Vesa <[email protected]>
---
drivers/misc/fastrpc.c | 8 +++++---
1 file changed, 5 insertions(+), 3 deletions(-)
diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
index 50c17f5da3a8..58654d394d17 100644
--- a/drivers/misc/fastrpc.c
+++ b/drivers/misc/fastrpc.c
@@ -332,10 +332,12 @@ static void fastrpc_map_put(struct fastrpc_map *map)
kref_put(&map->refcount, fastrpc_free_map);
}
-static void fastrpc_map_get(struct fastrpc_map *map)
+static int fastrpc_map_get(struct fastrpc_map *map)
{
- if (map)
- kref_get(&map->refcount);
+ if (!map)
+ return -ENOENT;
+
+ return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
}
--
2.34.1
On 02/09/2022 16:14, Abel Vesa wrote:
> From: Ola Jeppsson <[email protected]>
>
> It is possible that in between calling fastrpc_map_get() until
> map->fl->lock is taken in fastrpc_free_map(), another thread can call
> fastrpc_map_lookup() and get a reference to a map that is about to be
> deleted.
>
yes, there is a margin of window here.
Also am surprised to see the race for same map fd.
fastrpc_map_get() is always under a lock, have you tried adding locking
for fastrpc_map_put().
> Rewrite fastrpc_map_get() to only increase the reference count of a map
> if it's non-zero. Propagate this to callers so they can know if a map is
> about to be deleted.
This will work but it would be better to fix the window of race.
--srini
>
> Fixes this warning:
> refcount_t: addition on 0; use-after-free.
> WARNING: CPU: 5 PID: 10100 at lib/refcount.c:25 refcount_warn_saturate
> ...
> Call trace:
> refcount_warn_saturate
> [fastrpc_map_get inlined]
> [fastrpc_map_lookup inlined]
> fastrpc_map_create
> fastrpc_internal_invoke
> fastrpc_device_ioctl
> __arm64_sys_ioctl
> invoke_syscall
>
> Fixes: c68cfb718c8f9 ("misc: fastrpc: Add support for context Invoke method")
> Signed-off-by: Ola Jeppsson <[email protected]>
> Signed-off-by: Abel Vesa <[email protected]>
> ---
> drivers/misc/fastrpc.c | 8 +++++---
> 1 file changed, 5 insertions(+), 3 deletions(-)
>
> diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
> index 50c17f5da3a8..58654d394d17 100644
> --- a/drivers/misc/fastrpc.c
> +++ b/drivers/misc/fastrpc.c
> @@ -332,10 +332,12 @@ static void fastrpc_map_put(struct fastrpc_map *map)
> kref_put(&map->refcount, fastrpc_free_map);
> }
>
> -static void fastrpc_map_get(struct fastrpc_map *map)
> +static int fastrpc_map_get(struct fastrpc_map *map)
> {
> - if (map)
> - kref_get(&map->refcount);
> + if (!map)
> + return -ENOENT;
> +
> + return kref_get_unless_zero(&map->refcount) ? 0 : -ENOENT;
> }
>
>
On 02/09/2022 16:14, Abel Vesa wrote:
> Currently, there is a race window between the point when the mutex is
> unlocked in fastrpc_map_lookup and the reference count increasing
> (fastrpc_map_get) in fastrpc_map_find, which can also lead to
> use-after-free.
>
> So lets merge fastrpc_map_find into fastrpc_map_lookup which allows us
> to both protect the maps list by also taking the &fl->lock spinlock and
> the reference count, since the spinlock will be released only after.
> Add take_ref argument to make this suitable for all callers.
>
> Fixes: 8f6c1d8c4f0c ("misc: fastrpc: Add fdlist implementation")
> Co-developed-by: Ola Jeppsson <[email protected]>
> Signed-off-by: Ola Jeppsson <[email protected]>
> Signed-off-by: Abel Vesa <[email protected]>
> ---
Reviewed-by: Srinivas Kandagatla <[email protected]>
> drivers/misc/fastrpc.c | 41 +++++++++++++++++++++--------------------
> 1 file changed, 21 insertions(+), 20 deletions(-)
>
> diff --git a/drivers/misc/fastrpc.c b/drivers/misc/fastrpc.c
> index 93ebd174d848..0c816a11eeec 100644
> --- a/drivers/misc/fastrpc.c
> +++ b/drivers/misc/fastrpc.c
> @@ -333,30 +333,31 @@ static void fastrpc_map_get(struct fastrpc_map *map)
>
>
> static int fastrpc_map_lookup(struct fastrpc_user *fl, int fd,
> - struct fastrpc_map **ppmap)
> + struct fastrpc_map **ppmap, bool take_ref)
> {
> + struct fastrpc_session_ctx *sess = fl->sctx;
> struct fastrpc_map *map = NULL;
> + int ret = -ENOENT;
>
> - mutex_lock(&fl->mutex);
> + spin_lock(&fl->lock);
> list_for_each_entry(map, &fl->maps, node) {
> - if (map->fd == fd) {
> - *ppmap = map;
> - mutex_unlock(&fl->mutex);
> - return 0;
> - }
> - }
> - mutex_unlock(&fl->mutex);
> -
> - return -ENOENT;
> -}
> + if (map->fd != fd)
> + continue;
>
> -static int fastrpc_map_find(struct fastrpc_user *fl, int fd,
> - struct fastrpc_map **ppmap)
> -{
> - int ret = fastrpc_map_lookup(fl, fd, ppmap);
> + if (take_ref) {
> + ret = fastrpc_map_get(map);
> + if (ret) {
> + dev_dbg(sess->dev, "%s: Failed to get map fd=%d ret=%d\n",
> + __func__, fd, ret);
> + break;
> + }
> + }
>
> - if (!ret)
> - fastrpc_map_get(*ppmap);
> + *ppmap = map;
> + ret = 0;
> + break;
> + }
> + spin_unlock(&fl->lock);
>
> return ret;
> }
> @@ -703,7 +704,7 @@ static int fastrpc_map_create(struct fastrpc_user *fl, int fd,
> struct fastrpc_map *map = NULL;
> int err = 0;
>
> - if (!fastrpc_map_find(fl, fd, ppmap))
> + if (!fastrpc_map_lookup(fl, fd, ppmap, true))
> return 0;
>
> map = kzalloc(sizeof(*map), GFP_KERNEL);
> @@ -1026,7 +1027,7 @@ static int fastrpc_put_args(struct fastrpc_invoke_ctx *ctx,
> for (i = 0; i < FASTRPC_MAX_FDLIST; i++) {
> if (!fdlist[i])
> break;
> - if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap))
> + if (!fastrpc_map_lookup(fl, (int)fdlist[i], &mmap, false))
> fastrpc_map_put(mmap);
> }
>