Rather than have a local coding style, use the typical kernel style.
Joe Perches (13):
nvdimm: Use more typical whitespace
nvdimm: Move logical continuations to previous line
nvdimm: Use octal permissions
nvdimm: Use a more common kernel spacing style
nvdimm: Use "unsigned int" in preference to "unsigned"
nvdimm: Add and remove blank lines
nvdimm: Use typical kernel brace styles
nvdimm: Use typical kernel style indentation
nvdimm: btt.h: Neaten #defines to improve readability
nvdimm: namespace_devs: Move assignment operators
nvdimm: Use more common logic testing styles and bare ; positions
nvdimm: namespace_devs: Change progess typo to progress
nvdimm: Miscellaneous neatening
drivers/nvdimm/badrange.c | 22 +-
drivers/nvdimm/blk.c | 39 ++--
drivers/nvdimm/btt.c | 249 +++++++++++----------
drivers/nvdimm/btt.h | 56 ++---
drivers/nvdimm/btt_devs.c | 68 +++---
drivers/nvdimm/bus.c | 138 ++++++------
drivers/nvdimm/claim.c | 50 ++---
drivers/nvdimm/core.c | 42 ++--
drivers/nvdimm/dax_devs.c | 3 +-
drivers/nvdimm/dimm.c | 3 +-
drivers/nvdimm/dimm_devs.c | 107 ++++-----
drivers/nvdimm/e820.c | 2 +-
drivers/nvdimm/label.c | 213 +++++++++---------
drivers/nvdimm/label.h | 6 +-
drivers/nvdimm/namespace_devs.c | 472 +++++++++++++++++++++-------------------
drivers/nvdimm/nd-core.h | 31 +--
drivers/nvdimm/nd.h | 94 ++++----
drivers/nvdimm/nd_virtio.c | 20 +-
drivers/nvdimm/of_pmem.c | 6 +-
drivers/nvdimm/pfn_devs.c | 136 ++++++------
drivers/nvdimm/pmem.c | 57 ++---
drivers/nvdimm/pmem.h | 2 +-
drivers/nvdimm/region.c | 20 +-
drivers/nvdimm/region_devs.c | 160 +++++++-------
drivers/nvdimm/security.c | 138 ++++++------
drivers/nvdimm/virtio_pmem.c | 10 +-
26 files changed, 1115 insertions(+), 1029 deletions(-)
--
2.15.0
Make the logical continuation style more like the rest of the kernel.
No change in object files.
Signed-off-by: Joe Perches <[email protected]>
---
drivers/nvdimm/btt.c | 9 +++++----
drivers/nvdimm/bus.c | 4 ++--
drivers/nvdimm/claim.c | 4 ++--
drivers/nvdimm/dimm_devs.c | 23 ++++++++++++-----------
drivers/nvdimm/label.c | 8 ++++----
drivers/nvdimm/namespace_devs.c | 40 +++++++++++++++++++++-------------------
drivers/nvdimm/pfn_devs.c | 17 +++++++++--------
drivers/nvdimm/pmem.c | 5 +++--
drivers/nvdimm/region.c | 6 +++---
drivers/nvdimm/region_devs.c | 23 ++++++++++++-----------
drivers/nvdimm/security.c | 34 ++++++++++++++++++++--------------
11 files changed, 93 insertions(+), 80 deletions(-)
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index d3e187ac43eb..6362d96dfc16 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -603,8 +603,9 @@ static int btt_freelist_init(struct arena_info *arena)
static bool ent_is_padding(struct log_entry *ent)
{
- return (ent->lba == 0) && (ent->old_map == 0) && (ent->new_map == 0)
- && (ent->seq == 0);
+ return (ent->lba == 0) &&
+ (ent->old_map == 0) && (ent->new_map == 0) &&
+ (ent->seq == 0);
}
/*
@@ -1337,8 +1338,8 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
if (btt_is_badblock(btt, arena, arena->freelist[lane].block))
arena->freelist[lane].has_err = 1;
- if (mutex_is_locked(&arena->err_lock)
- || arena->freelist[lane].has_err) {
+ if (mutex_is_locked(&arena->err_lock) ||
+ arena->freelist[lane].has_err) {
nd_region_release_lane(btt->nd_region, lane);
ret = arena_clear_freelist_error(arena, lane);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 83b6fcbb252d..6d4d4c72ac92 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -189,8 +189,8 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
ndr_end = nd_region->ndr_start + nd_region->ndr_size - 1;
/* make sure we are in the region */
- if (ctx->phys < nd_region->ndr_start
- || (ctx->phys + ctx->cleared) > ndr_end)
+ if (ctx->phys < nd_region->ndr_start ||
+ (ctx->phys + ctx->cleared) > ndr_end)
return 0;
sector = (ctx->phys - nd_region->ndr_start) / 512;
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 62f3afaa7d27..ff66a3cc349c 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -274,8 +274,8 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
}
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
- if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
- && !(flags & NVDIMM_IO_ATOMIC)) {
+ if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512) &&
+ !(flags & NVDIMM_IO_ATOMIC)) {
long cleared;
might_sleep();
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 52b00078939b..cb5598b3c389 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -437,10 +437,11 @@ static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
if (a == &dev_attr_security.attr) {
/* Are there any state mutation ops (make writable)? */
- if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
- || nvdimm->sec.ops->change_key
- || nvdimm->sec.ops->erase
- || nvdimm->sec.ops->overwrite)
+ if (nvdimm->sec.ops->freeze ||
+ nvdimm->sec.ops->disable ||
+ nvdimm->sec.ops->change_key ||
+ nvdimm->sec.ops->erase ||
+ nvdimm->sec.ops->overwrite)
return a->mode;
return 0444;
}
@@ -516,8 +517,9 @@ int nvdimm_security_setup_events(struct device *dev)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
- if (!nvdimm->sec.flags || !nvdimm->sec.ops
- || !nvdimm->sec.ops->overwrite)
+ if (!nvdimm->sec.flags ||
+ !nvdimm->sec.ops ||
+ !nvdimm->sec.ops->overwrite)
return 0;
nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
if (!nvdimm->sec.overwrite_state)
@@ -589,8 +591,8 @@ int alias_dpa_busy(struct device *dev, void *data)
* (i.e. BLK is allocated after all aliased PMEM).
*/
if (info->res) {
- if (info->res->start >= nd_mapping->start
- && info->res->start < map_end)
+ if (info->res->start >= nd_mapping->start &&
+ info->res->start < map_end)
/* pass */;
else
return 0;
@@ -604,9 +606,8 @@ int alias_dpa_busy(struct device *dev, void *data)
for_each_dpa_resource(ndd, res) {
if (strncmp(res->name, "pmem", 4) != 0)
continue;
- if ((res->start >= blk_start && res->start < map_end)
- || (res->end >= blk_start
- && res->end <= map_end)) {
+ if ((res->start >= blk_start && res->start < map_end) ||
+ (res->end >= blk_start && res->end <= map_end)) {
new = max(blk_start, min(map_end + 1, res->end + 1));
if (new != blk_start) {
blk_start = new;
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index ebfad5183b23..37ea4fd89d3f 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -187,8 +187,8 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
}
size = __le64_to_cpu(nsindex[i]->mysize);
- if (size > sizeof_namespace_index(ndd)
- || size < sizeof(struct nd_namespace_index)) {
+ if (size > sizeof_namespace_index(ndd) ||
+ size < sizeof(struct nd_namespace_index)) {
dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
continue;
}
@@ -839,8 +839,8 @@ static int __pmem_label_update(struct nd_region *nd_region,
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
if (!label_ent->label)
continue;
- if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
- || memcmp(nspm->uuid, label_ent->label->uuid,
+ if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags) ||
+ memcmp(nspm->uuid, label_ent->label->uuid,
NSLABEL_UUID_LEN) == 0)
reap_victim(nd_mapping, label_ent);
}
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 5ffa137dc963..df2a82179622 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -598,8 +598,8 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
return;
/* allocation needs to be contiguous with the existing namespace */
- if (valid->start == exist->end + 1
- || valid->end == exist->start - 1)
+ if (valid->start == exist->end + 1 ||
+ valid->end == exist->start - 1)
return;
invalid:
@@ -777,9 +777,10 @@ static int merge_dpa(struct nd_region *nd_region,
struct resource *next = res->sibling;
resource_size_t end = res->start + resource_size(res);
- if (!next || strcmp(res->name, label_id->id) != 0
- || strcmp(next->name, label_id->id) != 0
- || end != next->start)
+ if (!next ||
+ strcmp(res->name, label_id->id) != 0 ||
+ strcmp(next->name, label_id->id) != 0 ||
+ end != next->start)
continue;
end += resource_size(next);
nvdimm_free_dpa(ndd, next);
@@ -1459,8 +1460,8 @@ static int btt_claim_class(struct device *dev)
loop_bitmask |= 1;
else {
/* check whether existing labels are v1.1 or v1.2 */
- if (__le16_to_cpu(nsindex->major) == 1
- && __le16_to_cpu(nsindex->minor) == 1)
+ if (__le16_to_cpu(nsindex->major) == 1 &&
+ __le16_to_cpu(nsindex->minor) == 1)
loop_bitmask |= 2;
else
loop_bitmask |= 4;
@@ -1658,11 +1659,12 @@ static umode_t namespace_visible(struct kobject *kobj,
return a->mode;
}
- if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
- || a == &dev_attr_holder.attr
- || a == &dev_attr_holder_class.attr
- || a == &dev_attr_force_raw.attr
- || a == &dev_attr_mode.attr)
+ if (a == &dev_attr_nstype.attr ||
+ a == &dev_attr_size.attr ||
+ a == &dev_attr_holder.attr ||
+ a == &dev_attr_holder_class.attr ||
+ a == &dev_attr_force_raw.attr ||
+ a == &dev_attr_mode.attr)
return a->mode;
return 0;
@@ -1818,9 +1820,9 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
continue;
- if (namespace_label_has(ndd, type_guid)
- && !guid_equal(&nd_set->type_guid,
- &nd_label->type_guid)) {
+ if (namespace_label_has(ndd, type_guid) &&
+ !guid_equal(&nd_set->type_guid,
+ &nd_label->type_guid)) {
dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
&nd_set->type_guid,
&nd_label->type_guid);
@@ -1882,8 +1884,8 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
hw_end = hw_start + nd_mapping->size;
pmem_start = __le64_to_cpu(nd_label->dpa);
pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
- if (pmem_start >= hw_start && pmem_start < hw_end
- && pmem_end <= hw_end && pmem_end > hw_start)
+ if (pmem_start >= hw_start && pmem_start < hw_end &&
+ pmem_end <= hw_end && pmem_end > hw_start)
/* pass */;
else {
dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
@@ -2049,8 +2051,8 @@ struct resource *nsblk_add_resource(struct nd_region *nd_region,
return NULL;
nsblk->res = (struct resource **) res;
for_each_dpa_resource(ndd, res)
- if (strcmp(res->name, label_id.id) == 0
- && res->start == start) {
+ if (strcmp(res->name, label_id.id) == 0 &&
+ res->start == start) {
nsblk->res[nsblk->num_resources++] = res;
return res;
}
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 354ec83f0081..5382b4f2f5ef 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -74,14 +74,14 @@ static ssize_t mode_store(struct device *dev,
else {
size_t n = len - 1;
- if (strncmp(buf, "pmem\n", n) == 0
- || strncmp(buf, "pmem", n) == 0) {
+ if (strncmp(buf, "pmem\n", n) == 0 ||
+ strncmp(buf, "pmem", n) == 0) {
nd_pfn->mode = PFN_MODE_PMEM;
- } else if (strncmp(buf, "ram\n", n) == 0
- || strncmp(buf, "ram", n) == 0)
+ } else if (strncmp(buf, "ram\n", n) == 0 ||
+ strncmp(buf, "ram", n) == 0)
nd_pfn->mode = PFN_MODE_RAM;
- else if (strncmp(buf, "none\n", n) == 0
- || strncmp(buf, "none", n) == 0)
+ else if (strncmp(buf, "none\n", n) == 0 ||
+ strncmp(buf, "none", n) == 0)
nd_pfn->mode = PFN_MODE_NONE;
else
rc = -EINVAL;
@@ -529,8 +529,9 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
return -EBUSY;
}
- if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
- || !IS_ALIGNED(offset, PAGE_SIZE)) {
+ if ((align &&
+ !IS_ALIGNED(nsio->res.start + offset + start_pad, align)) ||
+ !IS_ALIGNED(offset, PAGE_SIZE)) {
dev_err(&nd_pfn->dev,
"bad offset: %#llx dax disabled align: %#lx\n",
offset, align);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 29f19db46845..fa4500b4f2eb 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -506,8 +506,9 @@ static int nd_pmem_probe(struct device *dev)
return pmem_attach_disk(dev, ndns);
/* if we find a valid info-block we'll come back as that personality */
- if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
- || nd_dax_probe(dev, ndns) == 0)
+ if (nd_btt_probe(dev, ndns) == 0 ||
+ nd_pfn_probe(dev, ndns) == 0 ||
+ nd_dax_probe(dev, ndns) == 0)
return -ENXIO;
/* ...otherwise we're just a raw pmem device */
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 8b7dbac27aea..8486b6c26367 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -16,9 +16,9 @@ static int nd_region_probe(struct device *dev)
struct nd_region_data *ndrd;
struct nd_region *nd_region = to_nd_region(dev);
- if (nd_region->num_lanes > num_online_cpus()
- && nd_region->num_lanes < num_possible_cpus()
- && !test_and_set_bit(0, &once)) {
+ if (nd_region->num_lanes > num_online_cpus() &&
+ nd_region->num_lanes < num_possible_cpus() &&
+ !test_and_set_bit(0, &once)) {
dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
num_online_cpus(), nd_region->num_lanes,
num_possible_cpus());
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 025cd996ea58..0cff51370d3c 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -660,13 +660,13 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
return a->mode;
}
- if (a != &dev_attr_set_cookie.attr
- && a != &dev_attr_available_size.attr)
+ if (a != &dev_attr_set_cookie.attr &&
+ a != &dev_attr_available_size.attr)
return a->mode;
- if ((type == ND_DEVICE_NAMESPACE_PMEM
- || type == ND_DEVICE_NAMESPACE_BLK)
- && a == &dev_attr_available_size.attr)
+ if ((type == ND_DEVICE_NAMESPACE_PMEM ||
+ type == ND_DEVICE_NAMESPACE_BLK) &&
+ a == &dev_attr_available_size.attr)
return a->mode;
else if (is_memory(dev) && nd_set)
return a->mode;
@@ -688,8 +688,9 @@ u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
if (!nd_set)
return 0;
- if (nsindex && __le16_to_cpu(nsindex->major) == 1
- && __le16_to_cpu(nsindex->minor) == 1)
+ if (nsindex &&
+ __le16_to_cpu(nsindex->major) == 1 &&
+ __le16_to_cpu(nsindex->minor) == 1)
return nd_set->cookie1;
return nd_set->cookie2;
}
@@ -1002,8 +1003,8 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (test_bit(NDD_UNARMED, &nvdimm->flags))
ro = 1;
- if (test_bit(NDD_NOBLK, &nvdimm->flags)
- && dev_type == &nd_blk_device_type) {
+ if (test_bit(NDD_NOBLK, &nvdimm->flags) &&
+ dev_type == &nd_blk_device_type) {
dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
caller, dev_name(&nvdimm->dev), i);
return NULL;
@@ -1186,8 +1187,8 @@ int nvdimm_has_flush(struct nd_region *nd_region)
int i;
/* no nvdimm or pmem api == flushing capability unknown */
- if (nd_region->ndr_mappings == 0
- || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
+ if (nd_region->ndr_mappings == 0 ||
+ !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
return -ENXIO;
for (i = 0; i < nd_region->ndr_mappings; i++) {
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index be89ad78a368..8f1766c00c5f 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -173,8 +173,9 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
- if (!nvdimm->sec.ops || !nvdimm->sec.ops->unlock
- || !nvdimm->sec.flags)
+ if (!nvdimm->sec.ops ||
+ !nvdimm->sec.ops->unlock ||
+ !nvdimm->sec.flags)
return -EIO;
if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
@@ -246,8 +247,9 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
- if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable
- || !nvdimm->sec.flags)
+ if (!nvdimm->sec.ops ||
+ !nvdimm->sec.ops->disable ||
+ !nvdimm->sec.flags)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
@@ -281,8 +283,9 @@ static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
- if (!nvdimm->sec.ops || !nvdimm->sec.ops->change_key
- || !nvdimm->sec.flags)
+ if (!nvdimm->sec.ops ||
+ !nvdimm->sec.ops->change_key ||
+ !nvdimm->sec.flags)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
@@ -330,16 +333,17 @@ static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
- if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase
- || !nvdimm->sec.flags)
+ if (!nvdimm->sec.ops ||
+ !nvdimm->sec.ops->erase ||
+ !nvdimm->sec.flags)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
if (rc)
return rc;
- if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags)
- && pass_type == NVDIMM_MASTER) {
+ if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags) &&
+ pass_type == NVDIMM_MASTER) {
dev_dbg(dev,
"Attempt to secure erase in wrong master state.\n");
return -EOPNOTSUPP;
@@ -371,8 +375,9 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
/* The bus lock should be held at the top level of the call stack */
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
- if (!nvdimm->sec.ops || !nvdimm->sec.ops->overwrite
- || !nvdimm->sec.flags)
+ if (!nvdimm->sec.ops ||
+ !nvdimm->sec.ops->overwrite ||
+ !nvdimm->sec.flags)
return -EOPNOTSUPP;
if (dev->driver == NULL) {
@@ -427,8 +432,9 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
tmo = nvdimm->sec.overwrite_tmo;
- if (!nvdimm->sec.ops || !nvdimm->sec.ops->query_overwrite
- || !nvdimm->sec.flags)
+ if (!nvdimm->sec.ops ||
+ !nvdimm->sec.ops->query_overwrite ||
+ !nvdimm->sec.flags)
return;
rc = nvdimm->sec.ops->query_overwrite(nvdimm);
--
2.15.0
Avoid the use of the S_IRUGO define and use 0444 to improve readability
and use a more common kernel style.
Signed-off-by: Joe Perches <[email protected]>
---
drivers/nvdimm/btt.c | 39 ++++++++++++++++++---------------------
1 file changed, 18 insertions(+), 21 deletions(-)
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 6362d96dfc16..9cad4dca6eac 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -229,27 +229,24 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
return;
a->debugfs_dir = d;
- debugfs_create_x64("size", S_IRUGO, d, &a->size);
- debugfs_create_x64("external_lba_start", S_IRUGO, d,
- &a->external_lba_start);
- debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
- debugfs_create_u32("internal_lbasize", S_IRUGO, d,
- &a->internal_lbasize);
- debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
- debugfs_create_u32("external_lbasize", S_IRUGO, d,
- &a->external_lbasize);
- debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
- debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
- debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
- debugfs_create_x64("nextoff", S_IRUGO, d, &a->nextoff);
- debugfs_create_x64("infooff", S_IRUGO, d, &a->infooff);
- debugfs_create_x64("dataoff", S_IRUGO, d, &a->dataoff);
- debugfs_create_x64("mapoff", S_IRUGO, d, &a->mapoff);
- debugfs_create_x64("logoff", S_IRUGO, d, &a->logoff);
- debugfs_create_x64("info2off", S_IRUGO, d, &a->info2off);
- debugfs_create_x32("flags", S_IRUGO, d, &a->flags);
- debugfs_create_u32("log_index_0", S_IRUGO, d, &a->log_index[0]);
- debugfs_create_u32("log_index_1", S_IRUGO, d, &a->log_index[1]);
+ debugfs_create_x64("size", 0444, d, &a->size);
+ debugfs_create_x64("external_lba_start", 0444, d, &a->external_lba_start);
+ debugfs_create_x32("internal_nlba", 0444, d, &a->internal_nlba);
+ debugfs_create_u32("internal_lbasize", 0444, d, &a->internal_lbasize);
+ debugfs_create_x32("external_nlba", 0444, d, &a->external_nlba);
+ debugfs_create_u32("external_lbasize", 0444, d, &a->external_lbasize);
+ debugfs_create_u32("nfree", 0444, d, &a->nfree);
+ debugfs_create_u16("version_major", 0444, d, &a->version_major);
+ debugfs_create_u16("version_minor", 0444, d, &a->version_minor);
+ debugfs_create_x64("nextoff", 0444, d, &a->nextoff);
+ debugfs_create_x64("infooff", 0444, d, &a->infooff);
+ debugfs_create_x64("dataoff", 0444, d, &a->dataoff);
+ debugfs_create_x64("mapoff", 0444, d, &a->mapoff);
+ debugfs_create_x64("logoff", 0444, d, &a->logoff);
+ debugfs_create_x64("info2off", 0444, d, &a->info2off);
+ debugfs_create_x32("flags", 0444, d, &a->flags);
+ debugfs_create_u32("log_index_0", 0444, d, &a->log_index[0]);
+ debugfs_create_u32("log_index_1", 0444, d, &a->log_index[1]);
}
static void btt_debugfs_init(struct btt *btt)
--
2.15.0
Use the more common kernel spacing styles per line.
git diff -w shows no difference.
Signed-off-by: Joe Perches <[email protected]>
---
drivers/nvdimm/badrange.c | 4 ++--
drivers/nvdimm/blk.c | 2 +-
drivers/nvdimm/btt.c | 4 ++--
drivers/nvdimm/btt_devs.c | 2 +-
drivers/nvdimm/bus.c | 14 +++++++-------
drivers/nvdimm/core.c | 2 +-
drivers/nvdimm/label.c | 28 ++++++++++++++--------------
drivers/nvdimm/namespace_devs.c | 22 +++++++++++-----------
drivers/nvdimm/nd-core.h | 2 +-
drivers/nvdimm/nd.h | 4 ++--
drivers/nvdimm/pfn_devs.c | 6 +++---
drivers/nvdimm/pmem.c | 2 +-
drivers/nvdimm/region.c | 2 +-
drivers/nvdimm/region_devs.c | 2 +-
drivers/nvdimm/security.c | 18 +++++++++---------
15 files changed, 57 insertions(+), 57 deletions(-)
diff --git a/drivers/nvdimm/badrange.c b/drivers/nvdimm/badrange.c
index b997c2007b83..f2a742c6258a 100644
--- a/drivers/nvdimm/badrange.c
+++ b/drivers/nvdimm/badrange.c
@@ -165,11 +165,11 @@ EXPORT_SYMBOL_GPL(badrange_forget);
static void set_badblock(struct badblocks *bb, sector_t s, int num)
{
dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
- (u64) s * 512, (u64) num * 512);
+ (u64)s * 512, (u64)num * 512);
/* this isn't an error as the hardware will still throw an exception */
if (badblocks_set(bb, s, num, 1))
dev_info_once(bb->dev, "%s: failed for sector %llx\n",
- __func__, (u64) s);
+ __func__, (u64)s);
}
/**
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index edd3e1664edc..95acb48bfaed 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -189,7 +189,7 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
dev_dbg(&nsblk->common.dev,
"io error in %s sector %lld, len %d,\n",
(rw == READ) ? "READ" : "WRITE",
- (unsigned long long) iter.bi_sector, len);
+ (unsigned long long)iter.bi_sector, len);
bio->bi_status = errno_to_blk_status(err);
break;
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 9cad4dca6eac..28b65413abd8 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1007,7 +1007,7 @@ static int btt_arena_write_layout(struct arena_info *arena)
super->info2off = cpu_to_le64(arena->info2off - arena->infooff);
super->flags = 0;
- sum = nd_sb_checksum((struct nd_gen_sb *) super);
+ sum = nd_sb_checksum((struct nd_gen_sb *)super);
super->checksum = cpu_to_le64(sum);
ret = btt_info_write(arena, super);
@@ -1469,7 +1469,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
"io error in %s sector %lld, len %d,\n",
(op_is_write(bio_op(bio))) ? "WRITE" :
"READ",
- (unsigned long long) iter.bi_sector, len);
+ (unsigned long long)iter.bi_sector, len);
bio->bi_status = errno_to_blk_status(err);
break;
}
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 9c4cbda834be..f6429842f1b6 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -256,7 +256,7 @@ bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super)
checksum = le64_to_cpu(super->checksum);
super->checksum = 0;
- if (checksum != nd_sb_checksum((struct nd_gen_sb *) super))
+ if (checksum != nd_sb_checksum((struct nd_gen_sb *)super))
return false;
super->checksum = cpu_to_le64(checksum);
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 6d4d4c72ac92..35591f492d27 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -876,7 +876,7 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
return remainder;
return out_field[1] - 8;
} else if (cmd == ND_CMD_CALL) {
- struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *) in_field;
+ struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *)in_field;
return pkg->nd_size_out;
}
@@ -984,7 +984,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
const struct nd_cmd_desc *desc = NULL;
unsigned int cmd = _IOC_NR(ioctl_cmd);
struct device *dev = &nvdimm_bus->dev;
- void __user *p = (void __user *) arg;
+ void __user *p = (void __user *)arg;
char *out_env = NULL, *in_env = NULL;
const char *cmd_name, *dimm_name;
u32 in_len = 0, out_len = 0;
@@ -1073,7 +1073,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
for (i = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
- (u32 *) in_env, (u32 *) out_env, 0);
+ (u32 *)in_env, (u32 *)out_env, 0);
u32 copy;
if (out_size == UINT_MAX) {
@@ -1094,7 +1094,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
out_len += out_size;
}
- buf_len = (u64) out_len + (u64) in_len;
+ buf_len = (u64)out_len + (u64)in_len;
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
@@ -1150,7 +1150,7 @@ enum nd_ioctl_mode {
static int match_dimm(struct device *dev, void *data)
{
- long id = (long) data;
+ long id = (long)data;
if (is_nvdimm(dev)) {
struct nvdimm *nvdimm = to_nvdimm(dev);
@@ -1166,7 +1166,7 @@ static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
{
struct nvdimm_bus *nvdimm_bus, *found = NULL;
- long id = (long) file->private_data;
+ long id = (long)file->private_data;
struct nvdimm *nvdimm = NULL;
int rc, ro;
@@ -1221,7 +1221,7 @@ static int nd_open(struct inode *inode, struct file *file)
{
long minor = iminor(inode);
- file->private_data = (void *) minor;
+ file->private_data = (void *)minor;
return 0;
}
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index b3ff3e62d847..e30b39f49c46 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -184,7 +184,7 @@ u64 nd_fletcher64(void *addr, size_t len, bool le)
int i;
for (i = 0; i < len / sizeof(u32); i++) {
- lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i];
+ lo32 += le ? le32_to_cpu((__le32)buf[i]) : buf[i];
hi32 += lo32;
}
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 37ea4fd89d3f..2c780c5352dc 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -275,8 +275,8 @@ static int to_slot(struct nvdimm_drvdata *ndd,
{
unsigned long label, base;
- label = (unsigned long) nd_label;
- base = (unsigned long) nd_label_base(ndd);
+ label = (unsigned long)nd_label;
+ base = (unsigned long)nd_label_base(ndd);
return (label - base) / sizeof_namespace_label(ndd);
}
@@ -285,10 +285,10 @@ static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
{
unsigned long label, base;
- base = (unsigned long) nd_label_base(ndd);
+ base = (unsigned long)nd_label_base(ndd);
label = base + sizeof_namespace_label(ndd) * slot;
- return (struct nd_namespace_label *) label;
+ return (struct nd_namespace_label *)label;
}
#define for_each_clear_bit_le(bit, addr, size) \
@@ -314,7 +314,7 @@ static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
if (nsindex == NULL)
return false;
- *free = (unsigned long *) nsindex->free;
+ *free = (unsigned long *)nsindex->free;
*nslot = __le32_to_cpu(nsindex->nslot);
*nsindex_out = nsindex;
@@ -659,16 +659,16 @@ static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
memset(&nsindex->flags, 0, 3);
nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
nsindex->seq = __cpu_to_le32(seq);
- offset = (unsigned long) nsindex
- - (unsigned long) to_namespace_index(ndd, 0);
+ offset = (unsigned long)nsindex
+ - (unsigned long)to_namespace_index(ndd, 0);
nsindex->myoff = __cpu_to_le64(offset);
nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
- offset = (unsigned long) to_namespace_index(ndd,
+ offset = (unsigned long)to_namespace_index(ndd,
nd_label_next_nsindex(index))
- - (unsigned long) to_namespace_index(ndd, 0);
+ - (unsigned long)to_namespace_index(ndd, 0);
nsindex->otheroff = __cpu_to_le64(offset);
- offset = (unsigned long) nd_label_base(ndd)
- - (unsigned long) to_namespace_index(ndd, 0);
+ offset = (unsigned long)nd_label_base(ndd)
+ - (unsigned long)to_namespace_index(ndd, 0);
nsindex->labeloff = __cpu_to_le64(offset);
nsindex->nslot = __cpu_to_le32(nslot);
nsindex->major = __cpu_to_le16(1);
@@ -678,7 +678,7 @@ static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
nsindex->minor = __cpu_to_le16(2);
nsindex->checksum = __cpu_to_le64(0);
if (flags & ND_NSINDEX_INIT) {
- unsigned long *free = (unsigned long *) nsindex->free;
+ unsigned long *free = (unsigned long *)nsindex->free;
u32 nfree = ALIGN(nslot, BITS_PER_LONG);
int last_bits, i;
@@ -709,8 +709,8 @@ static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
struct nd_namespace_label *nd_label)
{
- return (unsigned long) nd_label
- - (unsigned long) to_namespace_index(ndd, 0);
+ return (unsigned long)nd_label
+ - (unsigned long)to_namespace_index(ndd, 0);
}
enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index df2a82179622..2bf4b6344926 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -824,8 +824,8 @@ int __reserve_free_pmem(struct device *dev, void *data)
rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
dev_WARN_ONCE(&nd_region->dev, rem,
"pmem reserve underrun: %#llx of %#llx bytes\n",
- (unsigned long long) n - rem,
- (unsigned long long) n);
+ (unsigned long long)n - rem,
+ (unsigned long long)n);
return rem ? -ENXIO : 0;
}
@@ -905,8 +905,8 @@ static int grow_dpa_allocation(struct nd_region *nd_region,
dev_WARN_ONCE(&nd_region->dev, rem,
"allocation underrun: %#llx of %#llx bytes\n",
- (unsigned long long) n - rem,
- (unsigned long long) n);
+ (unsigned long long)n - rem,
+ (unsigned long long)n);
if (rem)
return -ENXIO;
@@ -1245,7 +1245,7 @@ static int namespace_update_uuid(struct nd_region *nd_region,
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, old_label_id.id) == 0)
- sprintf((void *) res->name, "%s",
+ sprintf((void *)res->name, "%s",
new_label_id.id);
mutex_lock(&nd_mapping->lock);
@@ -1328,7 +1328,7 @@ static ssize_t resource_show(struct device *dev,
/* no address to convey if the namespace has no allocation */
if (resource_size(res) == 0)
return -ENXIO;
- return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
+ return sprintf(buf, "%#llx\n", (unsigned long long)res->start);
}
static DEVICE_ATTR_RO(resource);
@@ -2000,9 +2000,9 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
if (__le16_to_cpu(label0->position) != 0)
continue;
WARN_ON(nspm->alt_name || nspm->uuid);
- nspm->alt_name = kmemdup((void __force *) label0->name,
+ nspm->alt_name = kmemdup((void __force *)label0->name,
NSLABEL_NAME_LEN, GFP_KERNEL);
- nspm->uuid = kmemdup((void __force *) label0->uuid,
+ nspm->uuid = kmemdup((void __force *)label0->uuid,
NSLABEL_UUID_LEN, GFP_KERNEL);
nspm->lbasize = __le64_to_cpu(label0->lbasize);
ndd = to_ndd(nd_mapping);
@@ -2049,7 +2049,7 @@ struct resource *nsblk_add_resource(struct nd_region *nd_region,
GFP_KERNEL);
if (!res)
return NULL;
- nsblk->res = (struct resource **) res;
+ nsblk->res = (struct resource **)res;
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, label_id.id) == 0 &&
res->start == start) {
@@ -2277,8 +2277,8 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
static int cmp_dpa(const void *a, const void *b)
{
- const struct device *dev_a = *(const struct device **) a;
- const struct device *dev_b = *(const struct device **) b;
+ const struct device *dev_a = *(const struct device **)a;
+ const struct device *dev_b = *(const struct device **)b;
struct nd_namespace_blk *nsblk_a, *nsblk_b;
struct nd_namespace_pmem *nspm_a, *nspm_b;
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 60525ff1f19f..b9163fff27b0 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -63,7 +63,7 @@ static inline unsigned long nvdimm_security_flags(
/* disabled, locked, unlocked, and overwrite are mutually exclusive */
dev_WARN_ONCE(&nvdimm->dev, hweight64(flags & state_flags) > 1,
"reported invalid security state: %#llx\n",
- (unsigned long long) flags);
+ (unsigned long long)flags);
return flags;
}
int nvdimm_security_freeze(struct nvdimm *nvdimm);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 56ffd998d642..c10a4b94d44a 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -90,8 +90,8 @@ unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
#define nd_dbg_dpa(r, d, res, fmt, arg...) \
dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
(r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
- (unsigned long long) (res ? resource_size(res) : 0), \
- (unsigned long long) (res ? res->start : 0), ##arg)
+ (unsigned long long)(res ? resource_size(res) : 0), \
+ (unsigned long long)(res ? res->start : 0), ##arg)
#define for_each_dpa_resource(ndd, res) \
for (res = (ndd)->dpa.child; res; res = res->sibling)
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 5382b4f2f5ef..20a0cce9ee93 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -216,7 +216,7 @@ static ssize_t resource_show(struct device *dev,
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start
+ rc = sprintf(buf, "%#llx\n", (unsigned long long)nsio->res.start
+ start_pad + offset);
} else {
/* no address to convey if the pfn instance is disabled */
@@ -445,7 +445,7 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
checksum = le64_to_cpu(pfn_sb->checksum);
pfn_sb->checksum = 0;
- if (checksum != nd_sb_checksum((struct nd_gen_sb *) pfn_sb))
+ if (checksum != nd_sb_checksum((struct nd_gen_sb *)pfn_sb))
return -ENODEV;
pfn_sb->checksum = cpu_to_le64(checksum);
@@ -728,7 +728,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
pfn_sb->version_minor = cpu_to_le16(3);
pfn_sb->end_trunc = cpu_to_le32(end_trunc);
pfn_sb->align = cpu_to_le32(nd_pfn->align);
- checksum = nd_sb_checksum((struct nd_gen_sb *) pfn_sb);
+ checksum = nd_sb_checksum((struct nd_gen_sb *)pfn_sb);
pfn_sb->checksum = cpu_to_le64(checksum);
return nvdimm_write_bytes(ndns, SZ_4K, pfn_sb, sizeof(*pfn_sb), 0);
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index fa4500b4f2eb..dfe38d6b6607 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -85,7 +85,7 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
cleared /= 512;
dev_dbg(dev, "%#llx clear %ld sector%s\n",
- (unsigned long long) sector, cleared,
+ (unsigned long long)sector, cleared,
cleared > 1 ? "s" : "");
badblocks_clear(&pmem->bb, sector, cleared);
if (pmem->bb_state)
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 8486b6c26367..fdd67ff499c9 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -113,7 +113,7 @@ static int nd_region_remove(struct device *dev)
static int child_notify(struct device *dev, void *data)
{
- nd_device_notify(dev, *(enum nvdimm_event *) data);
+ nd_device_notify(dev, *(enum nvdimm_event *)data);
return 0;
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 0cff51370d3c..9265a2b0018c 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -44,7 +44,7 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
}
if (j < i)
- flush_page = (void __iomem *) ((unsigned long)
+ flush_page = (void __iomem *)((unsigned long)
ndrd_get_flush_wpq(ndrd, dimm, j)
& PAGE_MASK);
else
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 8f1766c00c5f..ac23cd4480bd 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -473,13 +473,13 @@ void nvdimm_security_overwrite_query(struct work_struct *work)
}
#define OPS \
- C( OP_FREEZE, "freeze", 1), \
- C( OP_DISABLE, "disable", 2), \
- C( OP_UPDATE, "update", 3), \
- C( OP_ERASE, "erase", 2), \
- C( OP_OVERWRITE, "overwrite", 2), \
- C( OP_MASTER_UPDATE, "master_update", 3), \
- C( OP_MASTER_ERASE, "master_erase", 2)
+ C(OP_FREEZE, "freeze", 1), \
+ C(OP_DISABLE, "disable", 2), \
+ C(OP_UPDATE, "update", 3), \
+ C(OP_ERASE, "erase", 2), \
+ C(OP_OVERWRITE, "overwrite", 2), \
+ C(OP_MASTER_UPDATE, "master_update", 3), \
+ C(OP_MASTER_ERASE, "master_erase", 2)
#undef C
#define C(a, b, c) a
enum nvdimmsec_op_ids { OPS };
@@ -498,8 +498,8 @@ ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
ssize_t rc;
- char cmd[SEC_CMD_SIZE+1], keystr[KEY_ID_SIZE+1],
- nkeystr[KEY_ID_SIZE+1];
+ char cmd[SEC_CMD_SIZE + 1], keystr[KEY_ID_SIZE + 1],
+ nkeystr[KEY_ID_SIZE + 1];
unsigned int key, newkey;
int i;
--
2.15.0
Use a more common kernel style.
Remove unnecessary multiple blank lines.
Remove blank lines before and after braces.
Add blank lines after functions definitions and enums.
Add blank lines around #define pr_fmt.
Signed-off-by: Joe Perches <[email protected]>
---
drivers/nvdimm/btt.c | 2 --
drivers/nvdimm/bus.c | 5 ++---
drivers/nvdimm/dimm.c | 1 -
drivers/nvdimm/dimm_devs.c | 2 ++
drivers/nvdimm/label.c | 1 -
drivers/nvdimm/namespace_devs.c | 5 -----
drivers/nvdimm/nd-core.h | 4 ++++
drivers/nvdimm/nd.h | 6 ++++++
drivers/nvdimm/nd_virtio.c | 1 -
drivers/nvdimm/region_devs.c | 1 +
drivers/nvdimm/security.c | 2 --
11 files changed, 15 insertions(+), 15 deletions(-)
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 28b65413abd8..0927cbdc5cc6 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1354,7 +1354,6 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
while (arena->rtt[i] == (RTT_VALID | new_postmap))
cpu_relax();
-
if (new_postmap >= arena->internal_nlba) {
ret = -EIO;
goto out_lane;
@@ -1496,7 +1495,6 @@ static int btt_rw_page(struct block_device *bdev, sector_t sector,
return rc;
}
-
static int btt_getgeo(struct block_device *bd, struct hd_geometry *geo)
{
/* some standard values */
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 35591f492d27..5ffd61c9c4b7 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -2,7 +2,9 @@
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/libnvdimm.h>
#include <linux/sched/mm.h>
#include <linux/vmalloc.h>
@@ -643,7 +645,6 @@ int nvdimm_revalidate_disk(struct gendisk *disk)
set_disk_ro(disk, 1);
return 0;
-
}
EXPORT_SYMBOL(nvdimm_revalidate_disk);
@@ -881,7 +882,6 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
return pkg->nd_size_out;
}
-
return UINT_MAX;
}
EXPORT_SYMBOL_GPL(nd_cmd_out_size);
@@ -940,7 +940,6 @@ static int nd_pmem_forget_poison_check(struct device *dev, void *data)
return -EBUSY;
return 0;
-
}
static int nd_ns_forget_poison_check(struct device *dev, void *data)
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 916710ae647f..5783c6d6dbdc 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -62,7 +62,6 @@ static int nvdimm_probe(struct device *dev)
if (rc < 0)
dev_dbg(dev, "failed to unlock dimm: %d\n", rc);
-
/*
* EACCES failures reading the namespace label-area-properties
* are interpreted as the DIMM capacity being locked but the
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index cb5598b3c389..873df96795b0 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -2,7 +2,9 @@
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
+
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/device.h>
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 5700d9b35b8f..bf58357927c4 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -972,7 +972,6 @@ static int __blk_label_update(struct nd_region *nd_region,
}
/* from here on we need to abort on error */
-
/* assign all resources to the namespace before writing the labels */
nsblk->res = NULL;
nsblk->num_resources = 0;
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 2bf4b6344926..600df84b4d2d 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -367,7 +367,6 @@ resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
}
EXPORT_SYMBOL(nd_namespace_blk_validate);
-
static int nd_namespace_label_update(struct nd_region *nd_region,
struct device *dev)
{
@@ -543,7 +542,6 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
return rc ? n : 0;
}
-
/**
* space_valid() - validate free dpa space against constraints
* @nd_region: hosting region of the free space
@@ -2009,7 +2007,6 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
if (namespace_label_has(ndd, abstraction_guid))
nspm->nsio.common.claim_class
= to_nvdimm_cclass(&label0->abstraction_guid);
-
}
if (!nspm->alt_name || !nspm->uuid) {
@@ -2217,7 +2214,6 @@ static int add_namespace_resource(struct nd_region *nd_region,
static struct device *create_namespace_blk(struct nd_region *nd_region,
struct nd_namespace_label *nd_label, int count)
{
-
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nd_interleave_set *nd_set = nd_region->nd_set;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -2362,7 +2358,6 @@ static struct device **scan_labels(struct nd_region *nd_region)
}
} else
devs[count++] = dev;
-
}
dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index b9163fff27b0..3b48fba4629b 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -66,6 +66,7 @@ static inline unsigned long nvdimm_security_flags(
(unsigned long long)flags);
return flags;
}
+
int nvdimm_security_freeze(struct nvdimm *nvdimm);
#if IS_ENABLED(CONFIG_NVDIMM_KEYS)
ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len);
@@ -76,6 +77,7 @@ static inline ssize_t nvdimm_security_store(struct device *dev,
{
return -EOPNOTSUPP;
}
+
static inline void nvdimm_security_overwrite_query(struct work_struct *work)
{
}
@@ -106,10 +108,12 @@ static inline bool is_nd_region(struct device *dev)
{
return is_nd_pmem(dev) || is_nd_blk(dev) || is_nd_volatile(dev);
}
+
static inline bool is_memory(struct device *dev)
{
return is_nd_pmem(dev) || is_nd_volatile(dev);
}
+
struct nvdimm_bus *walk_to_nvdimm_bus(struct device *nd_dev);
int __init nvdimm_bus_init(void);
void nvdimm_bus_exit(void);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 1636061b1f93..d434041ca2e5 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -108,6 +108,7 @@ struct nd_percpu_lane {
enum nd_label_flags {
ND_LABEL_REAP,
};
+
struct nd_label_ent {
struct list_head list;
unsigned long flags;
@@ -384,11 +385,13 @@ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
{
return -ENXIO;
}
+
static inline int devm_nsio_enable(struct device *dev,
struct nd_namespace_io *nsio)
{
return -ENXIO;
}
+
static inline void devm_nsio_disable(struct device *dev,
struct nd_namespace_io *nsio)
{
@@ -409,12 +412,14 @@ static inline bool nd_iostat_start(struct bio *bio, unsigned long *start)
&disk->part0);
return true;
}
+
static inline void nd_iostat_end(struct bio *bio, unsigned long start)
{
struct gendisk *disk = bio->bi_disk;
generic_end_io_acct(disk->queue, bio_op(bio), &disk->part0, start);
}
+
static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
unsigned int len)
{
@@ -428,6 +433,7 @@ static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
return false;
}
+
resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
const u8 *nd_dev_to_uuid(struct device *dev);
bool pmem_should_map_pages(struct device *dev);
diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
index e4f553633759..f09541bf3d5d 100644
--- a/drivers/nvdimm/nd_virtio.c
+++ b/drivers/nvdimm/nd_virtio.c
@@ -68,7 +68,6 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
*/
while ((err = virtqueue_add_sgs(vpmem->req_vq, sgs, 1, 1, req_data,
GFP_ATOMIC)) == -ENOSPC) {
-
dev_info(&vdev->dev, "failed to send command to virtio pmem device, no free slots in the virtqueue\n");
req_data->wq_buf_avail = false;
list_add_tail(&req_data->list, &vpmem->req_list);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 9265a2b0018c..76b08b64b0b1 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -1141,6 +1141,7 @@ int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
return rc;
}
+
/**
* nvdimm_flush - flush any posted write queues between the cpu and pmem media
* @nd_region: blk or interleaved pmem region
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index ac23cd4480bd..13bc5d54f0b6 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -135,7 +135,6 @@ static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
return key_data(*key);
}
-
static int nvdimm_key_revalidate(struct nvdimm *nvdimm)
{
struct key *key;
@@ -439,7 +438,6 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
rc = nvdimm->sec.ops->query_overwrite(nvdimm);
if (rc == -EBUSY) {
-
/* setup delayed work again */
tmo += 10;
queue_delayed_work(system_wq, &nvdimm->dwork, tmo * HZ);
--
2.15.0
Make the nvdimm code more like the rest of the kernel code to
improve readability.
Add balanced braces to multiple test blocks.
Remove else statements from blocks where the block above uses return.
e.g.:
if (foo) {
[code...];
return FOO;
} else if (bar) {
[code...];
return BAR;
} else
return BAZ;
becomes
if (foo) {
[code...];
return FOO;
}
if (bar) {
[code...];
return BAR;
}
return BAZ;
Signed-off-by: Joe Perches <[email protected]>
---
drivers/nvdimm/badrange.c | 3 +-
drivers/nvdimm/blk.c | 9 +++--
drivers/nvdimm/btt.c | 5 +--
drivers/nvdimm/btt_devs.c | 4 +--
drivers/nvdimm/bus.c | 10 +++---
drivers/nvdimm/claim.c | 7 ++--
drivers/nvdimm/dax_devs.c | 3 +-
drivers/nvdimm/dimm_devs.c | 13 ++++---
drivers/nvdimm/label.c | 13 +++----
drivers/nvdimm/namespace_devs.c | 78 ++++++++++++++++++++++++++---------------
drivers/nvdimm/pfn_devs.c | 24 +++++++------
drivers/nvdimm/pmem.c | 8 ++---
drivers/nvdimm/region_devs.c | 10 +++---
drivers/nvdimm/security.c | 8 +++--
14 files changed, 118 insertions(+), 77 deletions(-)
diff --git a/drivers/nvdimm/badrange.c b/drivers/nvdimm/badrange.c
index f2a742c6258a..681d99c59f52 100644
--- a/drivers/nvdimm/badrange.c
+++ b/drivers/nvdimm/badrange.c
@@ -206,8 +206,9 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
remaining -= done;
s += done;
}
- } else
+ } else {
set_badblock(bb, start_sector, num_sectors);
+ }
}
static void badblocks_populate(struct badrange *badrange,
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 95acb48bfaed..db3973c7f506 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -301,13 +301,16 @@ static int nd_blk_probe(struct device *dev)
dev_set_drvdata(dev, nsblk);
ndns->rw_bytes = nsblk_rw_bytes;
+
if (is_nd_btt(dev))
return nvdimm_namespace_attach_btt(ndns);
- else if (nd_btt_probe(dev, ndns) == 0) {
+
+ if (nd_btt_probe(dev, ndns) == 0) {
/* we'll come back as btt-blk */
return -ENXIO;
- } else
- return nsblk_attach_disk(nsblk);
+ }
+
+ return nsblk_attach_disk(nsblk);
}
static int nd_blk_remove(struct device *dev)
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 0927cbdc5cc6..39851edc2cc5 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -702,9 +702,10 @@ static int log_set_indices(struct arena_info *arena)
* Only allow the known permutations of log/padding indices,
* i.e. (0, 1), and (0, 2)
*/
- if ((log_index[0] == 0) && ((log_index[1] == 1) || (log_index[1] == 2)))
+ if ((log_index[0] == 0) &&
+ ((log_index[1] == 1) || (log_index[1] == 2))) {
; /* known index possibilities */
- else {
+ } else {
dev_err(to_dev(arena), "Found an unknown padding scheme\n");
return -ENXIO;
}
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index f6429842f1b6..9e0f17045e69 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -139,9 +139,9 @@ static ssize_t size_show(struct device *dev,
ssize_t rc;
nd_device_lock(dev);
- if (dev->driver)
+ if (dev->driver) {
rc = sprintf(buf, "%llu\n", nd_btt->size);
- else {
+ } else {
/* no size to convey if the btt instance is disabled */
rc = -ENXIO;
}
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 5ffd61c9c4b7..620f07ac306c 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -857,9 +857,9 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
if (nvdimm && cmd == ND_CMD_GET_CONFIG_DATA && idx == 1)
return in_field[1];
- else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
+ if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
return out_field[1];
- else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2) {
+ if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2) {
/*
* Per table 9-276 ARS Data in ACPI 6.1, out_field[1] is
* "Size of Output Buffer in bytes, including this
@@ -876,7 +876,8 @@ u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
if (out_field[1] - 4 == remainder)
return remainder;
return out_field[1] - 8;
- } else if (cmd == ND_CMD_CALL) {
+ }
+ if (cmd == ND_CMD_CALL) {
struct nd_cmd_pkg *pkg = (struct nd_cmd_pkg *)in_field;
return pkg->nd_size_out;
@@ -929,8 +930,9 @@ static int nd_pmem_forget_poison_check(struct device *dev, void *data)
if (!ndns)
return 0;
- } else
+ } else {
ndns = to_ndns(dev);
+ }
nsio = to_nd_namespace_io(&ndns->dev);
pstart = nsio->res.start + offset;
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index ff66a3cc349c..3732925aadb8 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -149,9 +149,9 @@ ssize_t nd_namespace_store(struct device *dev,
return -ENOMEM;
strim(name);
- if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)
+ if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0) {
/* pass */;
- else {
+ } else {
len = -EINVAL;
goto out;
}
@@ -288,8 +288,9 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
badblocks_clear(&nsio->bb, sector, cleared);
}
arch_invalidate_pmem(nsio->addr + offset, size);
- } else
+ } else {
rc = -EIO;
+ }
}
memcpy_flushcache(nsio->addr + offset, buf, size);
diff --git a/drivers/nvdimm/dax_devs.c b/drivers/nvdimm/dax_devs.c
index 6d22b0f83b3b..46230eb35b90 100644
--- a/drivers/nvdimm/dax_devs.c
+++ b/drivers/nvdimm/dax_devs.c
@@ -125,8 +125,9 @@ int nd_dax_probe(struct device *dev, struct nd_namespace_common *ndns)
if (rc < 0) {
nd_detach_ndns(dax_dev, &nd_pfn->ndns);
put_device(dax_dev);
- } else
+ } else {
__nd_device_register(dax_dev);
+ }
return rc;
}
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 873df96795b0..4df85dd72682 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -361,8 +361,9 @@ static ssize_t available_slots_show(struct device *dev,
if (nfree - 1 > nfree) {
dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
nfree = 0;
- } else
+ } else {
nfree--;
+ }
rc = sprintf(buf, "%d\n", nfree);
nvdimm_bus_unlock(dev);
return rc;
@@ -728,14 +729,15 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
blk_start = max(map_start, map_end + 1 - *overlap);
for_each_dpa_resource(ndd, res) {
if (res->start >= map_start && res->start < map_end) {
- if (strncmp(res->name, "blk", 3) == 0)
+ if (strncmp(res->name, "blk", 3) == 0) {
blk_start = min(blk_start,
max(map_start, res->start));
- else if (res->end > map_end) {
+ } else if (res->end > map_end) {
reason = "misaligned to iset";
goto err;
- } else
+ } else {
busy += resource_size(res);
+ }
} else if (res->end >= map_start && res->end <= map_end) {
if (strncmp(res->name, "blk", 3) == 0) {
/*
@@ -744,8 +746,9 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
* be used for BLK.
*/
blk_start = map_start;
- } else
+ } else {
busy += resource_size(res);
+ }
} else if (map_start > res->start && map_start < res->end) {
/* total eclipse of the mapping */
busy += nd_mapping->size;
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index bf58357927c4..e4632dbebead 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -734,20 +734,21 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
{
if (claim_class == NVDIMM_CCLASS_BTT)
return &nvdimm_btt_guid;
- else if (claim_class == NVDIMM_CCLASS_BTT2)
+ if (claim_class == NVDIMM_CCLASS_BTT2)
return &nvdimm_btt2_guid;
- else if (claim_class == NVDIMM_CCLASS_PFN)
+ if (claim_class == NVDIMM_CCLASS_PFN)
return &nvdimm_pfn_guid;
- else if (claim_class == NVDIMM_CCLASS_DAX)
+ if (claim_class == NVDIMM_CCLASS_DAX)
return &nvdimm_dax_guid;
- else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
+ if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
/*
* If we're modifying a namespace for which we don't
* know the claim_class, don't touch the existing guid.
*/
return target;
- } else
- return &guid_null;
+ }
+
+ return &guid_null;
}
static void reap_victim(struct nd_mapping *nd_mapping,
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 600df84b4d2d..70e1d752c12c 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -228,12 +228,14 @@ const u8 *nd_dev_to_uuid(struct device *dev)
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
return nspm->uuid;
- } else if (is_namespace_blk(dev)) {
+ }
+ if (is_namespace_blk(dev)) {
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
return nsblk->uuid;
- } else
- return null_uuid;
+ }
+
+ return null_uuid;
}
EXPORT_SYMBOL(nd_dev_to_uuid);
@@ -260,8 +262,9 @@ static ssize_t __alt_name_store(struct device *dev, const char *buf,
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
ns_altname = &nsblk->alt_name;
- } else
+ } else {
return -ENXIO;
+ }
if (dev->driver || to_ndns(dev)->claim)
return -EBUSY;
@@ -389,7 +392,8 @@ static int nd_namespace_label_update(struct nd_region *nd_region,
return 0;
return nd_pmem_namespace_label_update(nd_region, nspm, size);
- } else if (is_namespace_blk(dev)) {
+ }
+ if (is_namespace_blk(dev)) {
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
resource_size_t size = nd_namespace_blk_size(nsblk);
@@ -399,8 +403,9 @@ static int nd_namespace_label_update(struct nd_region *nd_region,
return 0;
return nd_blk_namespace_label_update(nd_region, nsblk, size);
- } else
- return -ENXIO;
+ }
+
+ return -ENXIO;
}
static ssize_t alt_name_store(struct device *dev,
@@ -435,8 +440,9 @@ static ssize_t alt_name_show(struct device *dev,
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
ns_altname = nsblk->alt_name;
- } else
+ } else {
return -ENXIO;
+ }
return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
}
@@ -685,8 +691,9 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
rc = adjust_resource(res, res->start - allocate,
resource_size(res) + allocate);
action = "cur grow up";
- } else
+ } else {
action = "allocate";
+ }
break;
case ALLOC_MID:
if (strcmp(next->name, label_id->id) == 0) {
@@ -698,8 +705,9 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
action = "next grow up";
} else if (strcmp(res->name, label_id->id) == 0) {
action = "grow down";
- } else
+ } else {
action = "allocate";
+ }
break;
case ALLOC_AFTER:
if (strcmp(res->name, label_id->id) == 0)
@@ -747,8 +755,9 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
* need to check this same resource again
*/
goto retry;
- } else
+ } else {
return 0;
+ }
}
/*
@@ -1115,14 +1124,18 @@ resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
return resource_size(&nspm->nsio.res);
- } else if (is_namespace_blk(dev)) {
+ }
+
+ if (is_namespace_blk(dev))
return nd_namespace_blk_size(to_nd_namespace_blk(dev));
- } else if (is_namespace_io(dev)) {
+
+ if (is_namespace_io(dev)) {
struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
return resource_size(&nsio->res);
- } else
- WARN_ONCE(1, "unknown namespace type\n");
+ }
+
+ WARN_ONCE(1, "unknown namespace type\n");
return 0;
}
@@ -1172,12 +1185,14 @@ static u8 *namespace_to_uuid(struct device *dev)
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
return nspm->uuid;
- } else if (is_namespace_blk(dev)) {
+ }
+ if (is_namespace_blk(dev)) {
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
return nsblk->uuid;
- } else
- return ERR_PTR(-ENXIO);
+ }
+
+ return ERR_PTR(-ENXIO);
}
static ssize_t uuid_show(struct device *dev,
@@ -1282,8 +1297,9 @@ static ssize_t uuid_store(struct device *dev,
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
ns_uuid = &nsblk->uuid;
- } else
+ } else {
return -ENXIO;
+ }
nd_device_lock(dev);
nvdimm_bus_lock(dev);
@@ -1320,8 +1336,9 @@ static ssize_t resource_show(struct device *dev,
struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
res = &nsio->res;
- } else
+ } else {
return -ENXIO;
+ }
/* no address to convey if the namespace has no allocation */
if (resource_size(res) == 0)
@@ -1372,8 +1389,9 @@ static ssize_t sector_size_store(struct device *dev,
lbasize = &nspm->lbasize;
supported = pmem_lbasize_supported;
- } else
+ } else {
return -ENXIO;
+ }
nd_device_lock(dev);
nvdimm_bus_lock(dev);
@@ -1454,9 +1472,9 @@ static int btt_claim_class(struct device *dev)
}
nsindex = to_namespace_index(ndd, ndd->ns_current);
- if (nsindex == NULL)
+ if (nsindex == NULL) {
loop_bitmask |= 1;
- else {
+ } else {
/* check whether existing labels are v1.1 or v1.2 */
if (__le16_to_cpu(nsindex->major) == 1 &&
__le16_to_cpu(nsindex->minor) == 1)
@@ -1883,9 +1901,9 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
pmem_start = __le64_to_cpu(nd_label->dpa);
pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
if (pmem_start >= hw_start && pmem_start < hw_end &&
- pmem_end <= hw_end && pmem_end > hw_start)
+ pmem_end <= hw_end && pmem_end > hw_start) {
/* pass */;
- else {
+ } else {
dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
dev_name(ndd->dev), nd_label->uuid);
return -EINVAL;
@@ -2335,9 +2353,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
kfree(devs);
devs = __devs;
- if (is_nd_blk(&nd_region->dev))
+ if (is_nd_blk(&nd_region->dev)) {
dev = create_namespace_blk(nd_region, nd_label, count);
- else {
+ } else {
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct nd_namespace_index *nsindex;
@@ -2356,8 +2374,9 @@ static struct device **scan_labels(struct nd_region *nd_region)
default:
goto err;
}
- } else
+ } else {
devs[count++] = dev;
+ }
}
dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
@@ -2576,8 +2595,9 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
id = ida_simple_get(&nd_region->ns_ida, 0, 0,
GFP_KERNEL);
nspm->id = id;
- } else
+ } else {
id = i;
+ }
if (id < 0)
break;
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 20a0cce9ee93..7226d6d95899 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -69,22 +69,23 @@ static ssize_t mode_store(struct device *dev,
nd_device_lock(dev);
nvdimm_bus_lock(dev);
- if (dev->driver)
+ if (dev->driver) {
rc = -EBUSY;
- else {
+ } else {
size_t n = len - 1;
if (strncmp(buf, "pmem\n", n) == 0 ||
strncmp(buf, "pmem", n) == 0) {
nd_pfn->mode = PFN_MODE_PMEM;
} else if (strncmp(buf, "ram\n", n) == 0 ||
- strncmp(buf, "ram", n) == 0)
+ strncmp(buf, "ram", n) == 0) {
nd_pfn->mode = PFN_MODE_RAM;
- else if (strncmp(buf, "none\n", n) == 0 ||
- strncmp(buf, "none", n) == 0)
+ } else if (strncmp(buf, "none\n", n) == 0 ||
+ strncmp(buf, "none", n) == 0) {
nd_pfn->mode = PFN_MODE_NONE;
- else
+ } else {
rc = -EINVAL;
+ }
}
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
buf[len - 1] == '\n' ? "" : "\n");
@@ -575,8 +576,9 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns)
if (rc < 0) {
nd_detach_ndns(pfn_dev, &nd_pfn->ndns);
put_device(pfn_dev);
- } else
+ } else {
__nd_device_register(pfn_dev);
+ }
return rc;
}
@@ -643,8 +645,9 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
altmap->free = PHYS_PFN(offset - reserve);
altmap->alloc = 0;
pgmap->flags |= PGMAP_ALTMAP_VALID;
- } else
+ } else {
return -ENXIO;
+ }
return 0;
}
@@ -706,10 +709,11 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
* PMD_SIZE for most architectures.
*/
offset = ALIGN(start + SZ_8K + 64 * npfns, align) - start;
- } else if (nd_pfn->mode == PFN_MODE_RAM)
+ } else if (nd_pfn->mode == PFN_MODE_RAM) {
offset = ALIGN(start + SZ_8K, align) - start;
- else
+ } else {
return -ENXIO;
+ }
if (offset >= size) {
dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index dfe38d6b6607..64e7429edcc2 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -150,9 +150,9 @@ static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
bad_pmem = true;
if (!op_is_write(op)) {
- if (unlikely(bad_pmem))
+ if (unlikely(bad_pmem)) {
rc = BLK_STS_IOERR;
- else {
+ } else {
rc = read_pmem(page, off, pmem_addr, len);
flush_dcache_page(page);
}
@@ -519,9 +519,9 @@ static int nd_pmem_remove(struct device *dev)
{
struct pmem_device *pmem = dev_get_drvdata(dev);
- if (is_nd_btt(dev))
+ if (is_nd_btt(dev)) {
nvdimm_namespace_detach_btt(to_nd_btt(dev));
- else {
+ } else {
/*
* Note, this assumes nd_device_lock() context to not
* race nd_pmem_notify()
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 76b08b64b0b1..16dfdbdbf1c8 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -381,8 +381,9 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
blk_max_overlap = overlap;
goto retry;
}
- } else if (is_nd_blk(&nd_region->dev))
+ } else if (is_nd_blk(&nd_region->dev)) {
available += nd_blk_available_dpa(nd_region);
+ }
}
return available;
@@ -956,8 +957,9 @@ unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
ndl_lock = per_cpu_ptr(nd_region->lane, lane);
if (ndl_count->count++ == 0)
spin_lock(&ndl_lock->lock);
- } else
+ } else {
lane = cpu;
+ }
return lane;
}
@@ -1132,9 +1134,9 @@ int nvdimm_flush(struct nd_region *nd_region, struct bio *bio)
{
int rc = 0;
- if (!nd_region->flush)
+ if (!nd_region->flush) {
rc = generic_nvdimm_flush(nd_region);
- else {
+ } else {
if (nd_region->flush(nd_region, bio))
rc = -EIO;
}
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 13bc5d54f0b6..693416001d17 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -194,8 +194,9 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
return 0;
return nvdimm_key_revalidate(nvdimm);
- } else
- data = nvdimm_get_key_payload(nvdimm, &key);
+ }
+
+ data = nvdimm_get_key_payload(nvdimm, &key);
rc = nvdimm->sec.ops->unlock(nvdimm, data);
dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
@@ -544,8 +545,9 @@ ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
return -EBUSY;
}
rc = security_overwrite(nvdimm, key);
- } else
+ } else {
return -EINVAL;
+ }
if (rc == 0)
rc = len;
--
2.15.0
Kernel code uses assignment operators where the statement is split
on multiple lines on the first line.
Move 2 unusual uses.
Signed-off-by: Joe Perches <[email protected]>
---
drivers/nvdimm/namespace_devs.c | 8 ++++----
1 file changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 70e1d752c12c..8c75ef84bad7 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -2023,8 +2023,8 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
nspm->lbasize = __le64_to_cpu(label0->lbasize);
ndd = to_ndd(nd_mapping);
if (namespace_label_has(ndd, abstraction_guid))
- nspm->nsio.common.claim_class
- = to_nvdimm_cclass(&label0->abstraction_guid);
+ nspm->nsio.common.claim_class =
+ to_nvdimm_cclass(&label0->abstraction_guid);
}
if (!nspm->alt_name || !nspm->uuid) {
@@ -2267,8 +2267,8 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
GFP_KERNEL);
if (namespace_label_has(ndd, abstraction_guid))
- nsblk->common.claim_class
- = to_nvdimm_cclass(&nd_label->abstraction_guid);
+ nsblk->common.claim_class =
+ to_nvdimm_cclass(&nd_label->abstraction_guid);
if (!nsblk->uuid)
goto blk_err;
memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
--
2.15.0
Avoid using uncommon logic testing styles to make the code a
bit more like other kernel code.
e.g.:
if (foo) {
;
} else {
<code>
}
is typically written
if (!foo) {
<code>
}
Also put bare semicolons before the comment not after the comment
e.g.:
if (foo) {
/* comment */;
} else if (bar) {
<code>
} else {
baz;
}
is typically written
if (foo) {
; /* comment */
} else if (bar) {
<code>
} else {
baz;
}
Signed-off-by: Joe Perches <[email protected]>
---
drivers/nvdimm/claim.c | 4 +---
drivers/nvdimm/dimm_devs.c | 11 ++++------
drivers/nvdimm/label.c | 4 +---
drivers/nvdimm/namespace_devs.c | 46 +++++++++++++++++++----------------------
drivers/nvdimm/region_devs.c | 4 +---
5 files changed, 28 insertions(+), 41 deletions(-)
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 3732925aadb8..244631f5308c 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -149,9 +149,7 @@ ssize_t nd_namespace_store(struct device *dev,
return -ENOMEM;
strim(name);
- if (strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0) {
- /* pass */;
- } else {
+ if (!(strncmp(name, "namespace", 9) == 0 || strcmp(name, "") == 0)) {
len = -EINVAL;
goto out;
}
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 4df85dd72682..cac62bb726bb 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -593,13 +593,10 @@ int alias_dpa_busy(struct device *dev, void *data)
* looking to validate against PMEM aliasing collision rules
* (i.e. BLK is allocated after all aliased PMEM).
*/
- if (info->res) {
- if (info->res->start >= nd_mapping->start &&
- info->res->start < map_end)
- /* pass */;
- else
- return 0;
- }
+ if (info->res &&
+ (info->res->start < nd_mapping->start ||
+ info->res->start >= map_end))
+ return 0;
retry:
/*
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index e4632dbebead..ae466c6faa90 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -1180,9 +1180,7 @@ static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
mutex_unlock(&nd_mapping->lock);
}
- if (ndd->ns_current == -1 || ndd->ns_next == -1)
- /* pass */;
- else
+ if (ndd->ns_current != -1 && ndd->ns_next != -1)
return max(num_labels, old_num_labels);
nsindex = to_namespace_index(ndd, 0);
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 8c75ef84bad7..7a16340f9853 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -162,7 +162,7 @@ unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
nspm = to_nd_namespace_pmem(&ndns->dev);
if (nspm->lbasize == 0 || nspm->lbasize == 512)
- /* default */;
+ ; /* default */
else if (nspm->lbasize == 4096)
return 4096;
else
@@ -387,7 +387,7 @@ static int nd_namespace_label_update(struct nd_region *nd_region,
resource_size_t size = resource_size(&nspm->nsio.res);
if (size == 0 && nspm->uuid)
- /* delete allocation */;
+ ; /* delete allocation */
else if (!nspm->uuid)
return 0;
@@ -398,7 +398,7 @@ static int nd_namespace_label_update(struct nd_region *nd_region,
resource_size_t size = nd_namespace_blk_size(nsblk);
if (size == 0 && nsblk->uuid)
- /* delete allocation */;
+ ; /* delete allocation */
else if (!nsblk->uuid || !nsblk->lbasize)
return 0;
@@ -1900,10 +1900,8 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
hw_end = hw_start + nd_mapping->size;
pmem_start = __le64_to_cpu(nd_label->dpa);
pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
- if (pmem_start >= hw_start && pmem_start < hw_end &&
- pmem_end <= hw_end && pmem_end > hw_start) {
- /* pass */;
- } else {
+ if (!(pmem_start >= hw_start && pmem_start < hw_end &&
+ pmem_end <= hw_end && pmem_end > hw_start)) {
dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
dev_name(ndd->dev), nd_label->uuid);
return -EINVAL;
@@ -2326,15 +2324,12 @@ static struct device **scan_labels(struct nd_region *nd_region)
list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
struct nd_namespace_label *nd_label = label_ent->label;
struct device **__devs;
- u32 flags;
+ bool localflag;
if (!nd_label)
continue;
- flags = __le32_to_cpu(nd_label->flags);
- if (is_nd_blk(&nd_region->dev)
- == !!(flags & NSLABEL_FLAG_LOCAL))
- /* pass, region matches label type */;
- else
+ localflag = __le32_to_cpu(nd_label->flags) & NSLABEL_FLAG_LOCAL;
+ if (is_nd_blk(&nd_region->dev) != localflag)
continue;
/* skip labels that describe extents outside of the region */
@@ -2494,19 +2489,20 @@ static int init_active_labels(struct nd_region *nd_region)
* the region from being activated.
*/
if (!ndd) {
- if (test_bit(NDD_LOCKED, &nvdimm->flags))
- /* fail, label data may be unreadable */;
- else if (test_bit(NDD_ALIASING, &nvdimm->flags))
- /* fail, labels needed to disambiguate dpa */;
- else
- return 0;
-
- dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
- dev_name(&nd_mapping->nvdimm->dev),
- test_bit(NDD_LOCKED, &nvdimm->flags)
- ? "locked" : "disabled");
- return -ENXIO;
+ if (test_bit(NDD_LOCKED, &nvdimm->flags) ||
+ /* label data may be unreadable */
+ test_bit(NDD_ALIASING, &nvdimm->flags)) {
+ /* labels needed to disambiguate dpa */
+
+ dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
+ dev_name(&nd_mapping->nvdimm->dev),
+ test_bit(NDD_LOCKED, &nvdimm->flags)
+ ? "locked" : "disabled");
+ return -ENXIO;
+ }
+ return 0;
}
+
nd_mapping->ndd = ndd;
atomic_inc(&nvdimm->busy);
get_ndd(ndd);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 65df07481909..6861e0997d21 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -320,9 +320,7 @@ static ssize_t set_cookie_show(struct device *dev,
struct nd_interleave_set *nd_set = nd_region->nd_set;
ssize_t rc = 0;
- if (is_memory(dev) && nd_set)
- /* pass, should be precluded by region_visible */;
- else
+ if (!(is_memory(dev) && nd_set))
return -ENXIO;
/*
--
2.15.0
Use tab alignment to make the content and macro a bit more intelligible.
Use the BIT and BIT_ULL macros.
Convert MAP_LBA_MASK to use the already defined shift masks.
Signed-off-by: Joe Perches <[email protected]>
---
drivers/nvdimm/btt.h | 54 ++++++++++++++++++++++++++--------------------------
1 file changed, 27 insertions(+), 27 deletions(-)
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index 1da76da3e159..fb0f4546153f 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -10,34 +10,34 @@
#include <linux/badblocks.h>
#include <linux/types.h>
-#define BTT_SIG_LEN 16
-#define BTT_SIG "BTT_ARENA_INFO\0"
-#define MAP_ENT_SIZE 4
-#define MAP_TRIM_SHIFT 31
-#define MAP_TRIM_MASK (1 << MAP_TRIM_SHIFT)
-#define MAP_ERR_SHIFT 30
-#define MAP_ERR_MASK (1 << MAP_ERR_SHIFT)
-#define MAP_LBA_MASK (~((1 << MAP_TRIM_SHIFT) | (1 << MAP_ERR_SHIFT)))
-#define MAP_ENT_NORMAL 0xC0000000
-#define LOG_GRP_SIZE sizeof(struct log_group)
-#define LOG_ENT_SIZE sizeof(struct log_entry)
-#define ARENA_MIN_SIZE (1UL << 24) /* 16 MB */
-#define ARENA_MAX_SIZE (1ULL << 39) /* 512 GB */
-#define RTT_VALID (1UL << 31)
-#define RTT_INVALID 0
-#define BTT_PG_SIZE 4096
-#define BTT_DEFAULT_NFREE ND_MAX_LANES
-#define LOG_SEQ_INIT 1
-
-#define IB_FLAG_ERROR 0x00000001
-#define IB_FLAG_ERROR_MASK 0x00000001
-
-#define ent_lba(ent) (ent & MAP_LBA_MASK)
-#define ent_e_flag(ent) (!!(ent & MAP_ERR_MASK))
-#define ent_z_flag(ent) (!!(ent & MAP_TRIM_MASK))
-#define set_e_flag(ent) (ent |= MAP_ERR_MASK)
+#define BTT_SIG_LEN 16
+#define BTT_SIG "BTT_ARENA_INFO\0"
+#define MAP_ENT_SIZE 4
+#define MAP_TRIM_SHIFT 31
+#define MAP_TRIM_MASK BIT(MAP_TRIM_SHIFT)
+#define MAP_ERR_SHIFT 30
+#define MAP_ERR_MASK BIT(MAP_ERR_SHIFT)
+#define MAP_LBA_MASK (~(MAP_TRIM_MASK | MAP_ERR_MASK))
+#define MAP_ENT_NORMAL 0xC0000000
+#define LOG_GRP_SIZE sizeof(struct log_group)
+#define LOG_ENT_SIZE sizeof(struct log_entry)
+#define ARENA_MIN_SIZE BIT(24) /* 16 MB */
+#define ARENA_MAX_SIZE BIT_ULL(39) /* 512 GB */
+#define RTT_VALID BIT(31)
+#define RTT_INVALID 0
+#define BTT_PG_SIZE 4096
+#define BTT_DEFAULT_NFREE ND_MAX_LANES
+#define LOG_SEQ_INIT 1
+
+#define IB_FLAG_ERROR 0x00000001
+#define IB_FLAG_ERROR_MASK 0x00000001
+
+#define ent_lba(ent) ((ent) & MAP_LBA_MASK)
+#define ent_e_flag(ent) (!!((ent) & MAP_ERR_MASK))
+#define ent_z_flag(ent) (!!((ent) & MAP_TRIM_MASK))
+#define set_e_flag(ent) ((ent) |= MAP_ERR_MASK)
/* 'normal' is both e and z flags set */
-#define ent_normal(ent) (ent_e_flag(ent) && ent_z_flag(ent))
+#define ent_normal(ent) (ent_e_flag(ent) && ent_z_flag(ent))
enum btt_init_state {
INIT_UNCHECKED = 0,
--
2.15.0
Make the nvdimm code more like the rest of the kernel.
Avoid indentation of labels and spaces where tabs should be used.
Signed-off-by: Joe Perches <[email protected]>
---
drivers/nvdimm/btt.c | 2 +-
drivers/nvdimm/region_devs.c | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 39851edc2cc5..0df4461fe607 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -1320,7 +1320,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
u32 cur_len;
int e_flag;
- retry:
+retry:
lane = nd_region_acquire_lane(btt->nd_region);
ret = lba_to_arena(btt, sector, &premap, &arena);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 16dfdbdbf1c8..65df07481909 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -1044,7 +1044,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if (!nd_region->lane)
goto err_percpu;
- for (i = 0; i < nr_cpu_ids; i++) {
+ for (i = 0; i < nr_cpu_ids; i++) {
struct nd_percpu_lane *ndl;
ndl = per_cpu_ptr(nd_region->lane, i);
--
2.15.0
Convert the file to a more common kernel whitespace style to make
this more like other kernel files
git diff -w shows no difference.
Signed-off-by: Joe Perches <[email protected]>
---
drivers/nvdimm/badrange.c | 18 +--
drivers/nvdimm/blk.c | 26 ++--
drivers/nvdimm/btt.c | 192 +++++++++++++--------------
drivers/nvdimm/btt.h | 2 +-
drivers/nvdimm/btt_devs.c | 42 +++---
drivers/nvdimm/bus.c | 100 +++++++-------
drivers/nvdimm/claim.c | 36 +++---
drivers/nvdimm/core.c | 36 +++---
drivers/nvdimm/dimm.c | 2 +-
drivers/nvdimm/dimm_devs.c | 68 +++++-----
drivers/nvdimm/e820.c | 2 +-
drivers/nvdimm/label.c | 146 ++++++++++-----------
drivers/nvdimm/label.h | 4 +-
drivers/nvdimm/namespace_devs.c | 280 ++++++++++++++++++++--------------------
drivers/nvdimm/nd-core.h | 26 ++--
drivers/nvdimm/nd.h | 66 +++++-----
drivers/nvdimm/nd_virtio.c | 18 +--
drivers/nvdimm/of_pmem.c | 6 +-
drivers/nvdimm/pfn_devs.c | 86 ++++++------
drivers/nvdimm/pmem.c | 44 +++----
drivers/nvdimm/pmem.h | 2 +-
drivers/nvdimm/region.c | 16 +--
drivers/nvdimm/region_devs.c | 122 ++++++++---------
drivers/nvdimm/security.c | 84 ++++++------
drivers/nvdimm/virtio_pmem.c | 8 +-
25 files changed, 716 insertions(+), 716 deletions(-)
diff --git a/drivers/nvdimm/badrange.c b/drivers/nvdimm/badrange.c
index b9eeefa27e3a..b997c2007b83 100644
--- a/drivers/nvdimm/badrange.c
+++ b/drivers/nvdimm/badrange.c
@@ -24,7 +24,7 @@ void badrange_init(struct badrange *badrange)
EXPORT_SYMBOL_GPL(badrange_init);
static void append_badrange_entry(struct badrange *badrange,
- struct badrange_entry *bre, u64 addr, u64 length)
+ struct badrange_entry *bre, u64 addr, u64 length)
{
lockdep_assert_held(&badrange->lock);
bre->start = addr;
@@ -33,7 +33,7 @@ static void append_badrange_entry(struct badrange *badrange,
}
static int alloc_and_append_badrange_entry(struct badrange *badrange,
- u64 addr, u64 length, gfp_t flags)
+ u64 addr, u64 length, gfp_t flags)
{
struct badrange_entry *bre;
@@ -99,7 +99,7 @@ int badrange_add(struct badrange *badrange, u64 addr, u64 length)
EXPORT_SYMBOL_GPL(badrange_add);
void badrange_forget(struct badrange *badrange, phys_addr_t start,
- unsigned int len)
+ unsigned int len)
{
struct list_head *badrange_list = &badrange->list;
u64 clr_end = start + len - 1;
@@ -152,7 +152,7 @@ void badrange_forget(struct badrange *badrange, phys_addr_t start,
/* Add new entry covering the right half */
alloc_and_append_badrange_entry(badrange, new_start,
- new_len, GFP_NOWAIT);
+ new_len, GFP_NOWAIT);
/* Adjust this entry to cover the left half */
bre->length = start - bre->start;
continue;
@@ -165,11 +165,11 @@ EXPORT_SYMBOL_GPL(badrange_forget);
static void set_badblock(struct badblocks *bb, sector_t s, int num)
{
dev_dbg(bb->dev, "Found a bad range (0x%llx, 0x%llx)\n",
- (u64) s * 512, (u64) num * 512);
+ (u64) s * 512, (u64) num * 512);
/* this isn't an error as the hardware will still throw an exception */
if (badblocks_set(bb, s, num, 1))
dev_info_once(bb->dev, "%s: failed for sector %llx\n",
- __func__, (u64) s);
+ __func__, (u64) s);
}
/**
@@ -211,7 +211,7 @@ static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
}
static void badblocks_populate(struct badrange *badrange,
- struct badblocks *bb, const struct resource *res)
+ struct badblocks *bb, const struct resource *res)
{
struct badrange_entry *bre;
@@ -267,13 +267,13 @@ static void badblocks_populate(struct badrange *badrange,
* and add badblocks entries for all matching sub-ranges
*/
void nvdimm_badblocks_populate(struct nd_region *nd_region,
- struct badblocks *bb, const struct resource *res)
+ struct badblocks *bb, const struct resource *res)
{
struct nvdimm_bus *nvdimm_bus;
if (!is_memory(&nd_region->dev)) {
dev_WARN_ONCE(&nd_region->dev, 1,
- "%s only valid for pmem regions\n", __func__);
+ "%s only valid for pmem regions\n", __func__);
return;
}
nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index 677d6f45b5c4..edd3e1664edc 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -29,7 +29,7 @@ static u32 nsblk_sector_size(struct nd_namespace_blk *nsblk)
}
static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
- resource_size_t ns_offset, unsigned int len)
+ resource_size_t ns_offset, unsigned int len)
{
int i;
@@ -37,7 +37,7 @@ static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
if (ns_offset < resource_size(nsblk->res[i])) {
if (ns_offset + len > resource_size(nsblk->res[i])) {
dev_WARN_ONCE(&nsblk->common.dev, 1,
- "illegal request\n");
+ "illegal request\n");
return SIZE_MAX;
}
return nsblk->res[i]->start + ns_offset;
@@ -61,7 +61,7 @@ static struct nd_blk_region *to_ndbr(struct nd_namespace_blk *nsblk)
#ifdef CONFIG_BLK_DEV_INTEGRITY
static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
- struct bio_integrity_payload *bip, u64 lba, int rw)
+ struct bio_integrity_payload *bip, u64 lba, int rw)
{
struct nd_blk_region *ndbr = to_ndbr(nsblk);
unsigned int len = nsblk_meta_size(nsblk);
@@ -91,7 +91,7 @@ static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
cur_len = min(len, bv.bv_len);
iobuf = kmap_atomic(bv.bv_page);
err = ndbr->do_io(ndbr, dev_offset, iobuf + bv.bv_offset,
- cur_len, rw);
+ cur_len, rw);
kunmap_atomic(iobuf);
if (err)
return err;
@@ -107,15 +107,15 @@ static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
#else /* CONFIG_BLK_DEV_INTEGRITY */
static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
- struct bio_integrity_payload *bip, u64 lba, int rw)
+ struct bio_integrity_payload *bip, u64 lba, int rw)
{
return 0;
}
#endif
static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
- struct bio_integrity_payload *bip, struct page *page,
- unsigned int len, unsigned int off, int rw, sector_t sector)
+ struct bio_integrity_payload *bip, struct page *page,
+ unsigned int len, unsigned int off, int rw, sector_t sector)
{
struct nd_blk_region *ndbr = to_ndbr(nsblk);
resource_size_t dev_offset, ns_offset;
@@ -184,12 +184,12 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
BUG_ON(len > PAGE_SIZE);
err = nsblk_do_bvec(nsblk, bip, bvec.bv_page, len,
- bvec.bv_offset, rw, iter.bi_sector);
+ bvec.bv_offset, rw, iter.bi_sector);
if (err) {
dev_dbg(&nsblk->common.dev,
- "io error in %s sector %lld, len %d,\n",
- (rw == READ) ? "READ" : "WRITE",
- (unsigned long long) iter.bi_sector, len);
+ "io error in %s sector %lld, len %d,\n",
+ (rw == READ) ? "READ" : "WRITE",
+ (unsigned long long) iter.bi_sector, len);
bio->bi_status = errno_to_blk_status(err);
break;
}
@@ -202,8 +202,8 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
}
static int nsblk_rw_bytes(struct nd_namespace_common *ndns,
- resource_size_t offset, void *iobuf, size_t n, int rw,
- unsigned long flags)
+ resource_size_t offset, void *iobuf, size_t n, int rw,
+ unsigned long flags)
{
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(&ndns->dev);
struct nd_blk_region *ndbr = to_ndbr(nsblk);
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index a8d56887ec88..d3e187ac43eb 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -35,7 +35,7 @@ static u64 adjust_initial_offset(struct nd_btt *nd_btt, u64 offset)
}
static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
- void *buf, size_t n, unsigned long flags)
+ void *buf, size_t n, unsigned long flags)
{
struct nd_btt *nd_btt = arena->nd_btt;
struct nd_namespace_common *ndns = nd_btt->ndns;
@@ -46,7 +46,7 @@ static int arena_read_bytes(struct arena_info *arena, resource_size_t offset,
}
static int arena_write_bytes(struct arena_info *arena, resource_size_t offset,
- void *buf, size_t n, unsigned long flags)
+ void *buf, size_t n, unsigned long flags)
{
struct nd_btt *nd_btt = arena->nd_btt;
struct nd_namespace_common *ndns = nd_btt->ndns;
@@ -66,23 +66,23 @@ static int btt_info_write(struct arena_info *arena, struct btt_sb *super)
* correctly, so make sure that is the case.
*/
dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->infooff, 512),
- "arena->infooff: %#llx is unaligned\n", arena->infooff);
+ "arena->infooff: %#llx is unaligned\n", arena->infooff);
dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->info2off, 512),
- "arena->info2off: %#llx is unaligned\n", arena->info2off);
+ "arena->info2off: %#llx is unaligned\n", arena->info2off);
ret = arena_write_bytes(arena, arena->info2off, super,
- sizeof(struct btt_sb), 0);
+ sizeof(struct btt_sb), 0);
if (ret)
return ret;
return arena_write_bytes(arena, arena->infooff, super,
- sizeof(struct btt_sb), 0);
+ sizeof(struct btt_sb), 0);
}
static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
{
return arena_read_bytes(arena, arena->infooff, super,
- sizeof(struct btt_sb), 0);
+ sizeof(struct btt_sb), 0);
}
/*
@@ -92,19 +92,19 @@ static int btt_info_read(struct arena_info *arena, struct btt_sb *super)
* mapping contains 'E' and 'Z' flags as desired
*/
static int __btt_map_write(struct arena_info *arena, u32 lba, __le32 mapping,
- unsigned long flags)
+ unsigned long flags)
{
u64 ns_off = arena->mapoff + (lba * MAP_ENT_SIZE);
if (unlikely(lba >= arena->external_nlba))
dev_err_ratelimited(to_dev(arena),
- "%s: lba %#x out of range (max: %#x)\n",
- __func__, lba, arena->external_nlba);
+ "%s: lba %#x out of range (max: %#x)\n",
+ __func__, lba, arena->external_nlba);
return arena_write_bytes(arena, ns_off, &mapping, MAP_ENT_SIZE, flags);
}
static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
- u32 z_flag, u32 e_flag, unsigned long rwb_flags)
+ u32 z_flag, u32 e_flag, unsigned long rwb_flags)
{
u32 ze;
__le32 mapping_le;
@@ -139,7 +139,7 @@ static int btt_map_write(struct arena_info *arena, u32 lba, u32 mapping,
* to avoid confusion
*/
dev_err_ratelimited(to_dev(arena),
- "Invalid use of Z and E flags\n");
+ "Invalid use of Z and E flags\n");
return -EIO;
}
@@ -157,8 +157,8 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
if (unlikely(lba >= arena->external_nlba))
dev_err_ratelimited(to_dev(arena),
- "%s: lba %#x out of range (max: %#x)\n",
- __func__, lba, arena->external_nlba);
+ "%s: lba %#x out of range (max: %#x)\n",
+ __func__, lba, arena->external_nlba);
ret = arena_read_bytes(arena, ns_off, &in, MAP_ENT_SIZE, rwb_flags);
if (ret)
@@ -204,17 +204,17 @@ static int btt_map_read(struct arena_info *arena, u32 lba, u32 *mapping,
}
static int btt_log_group_read(struct arena_info *arena, u32 lane,
- struct log_group *log)
+ struct log_group *log)
{
return arena_read_bytes(arena,
- arena->logoff + (lane * LOG_GRP_SIZE), log,
- LOG_GRP_SIZE, 0);
+ arena->logoff + (lane * LOG_GRP_SIZE), log,
+ LOG_GRP_SIZE, 0);
}
static struct dentry *debugfs_root;
static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
- int idx)
+ int idx)
{
char dirname[32];
struct dentry *d;
@@ -231,13 +231,13 @@ static void arena_debugfs_init(struct arena_info *a, struct dentry *parent,
debugfs_create_x64("size", S_IRUGO, d, &a->size);
debugfs_create_x64("external_lba_start", S_IRUGO, d,
- &a->external_lba_start);
+ &a->external_lba_start);
debugfs_create_x32("internal_nlba", S_IRUGO, d, &a->internal_nlba);
debugfs_create_u32("internal_lbasize", S_IRUGO, d,
- &a->internal_lbasize);
+ &a->internal_lbasize);
debugfs_create_x32("external_nlba", S_IRUGO, d, &a->external_nlba);
debugfs_create_u32("external_lbasize", S_IRUGO, d,
- &a->external_lbasize);
+ &a->external_lbasize);
debugfs_create_u32("nfree", S_IRUGO, d, &a->nfree);
debugfs_create_u16("version_major", S_IRUGO, d, &a->version_major);
debugfs_create_u16("version_minor", S_IRUGO, d, &a->version_minor);
@@ -258,7 +258,7 @@ static void btt_debugfs_init(struct btt *btt)
struct arena_info *arena;
btt->debugfs_dir = debugfs_create_dir(dev_name(&btt->nd_btt->dev),
- debugfs_root);
+ debugfs_root);
if (IS_ERR_OR_NULL(btt->debugfs_dir))
return;
@@ -338,9 +338,9 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
old_ent = btt_log_get_old(arena, &log);
if (old_ent < 0 || old_ent > 1) {
dev_err(to_dev(arena),
- "log corruption (%d): lane %d seq [%d, %d]\n",
- old_ent, lane, log.ent[arena->log_index[0]].seq,
- log.ent[arena->log_index[1]].seq);
+ "log corruption (%d): lane %d seq [%d, %d]\n",
+ old_ent, lane, log.ent[arena->log_index[0]].seq,
+ log.ent[arena->log_index[1]].seq);
/* TODO set error state? */
return -EIO;
}
@@ -359,7 +359,7 @@ static int btt_log_read(struct arena_info *arena, u32 lane,
* btt_flog_write is the wrapper for updating the freelist elements
*/
static int __btt_log_write(struct arena_info *arena, u32 lane,
- u32 sub, struct log_entry *ent, unsigned long flags)
+ u32 sub, struct log_entry *ent, unsigned long flags)
{
int ret;
u32 group_slot = arena->log_index[sub];
@@ -380,7 +380,7 @@ static int __btt_log_write(struct arena_info *arena, u32 lane,
}
static int btt_flog_write(struct arena_info *arena, u32 lane, u32 sub,
- struct log_entry *ent)
+ struct log_entry *ent)
{
int ret;
@@ -421,15 +421,15 @@ static int btt_map_init(struct arena_info *arena)
* is the case.
*/
dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->mapoff, 512),
- "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
+ "arena->mapoff: %#llx is unaligned\n", arena->mapoff);
while (mapsize) {
size_t size = min(mapsize, chunk_size);
dev_WARN_ONCE(to_dev(arena), size < 512,
- "chunk size: %#zx is unaligned\n", size);
+ "chunk size: %#zx is unaligned\n", size);
ret = arena_write_bytes(arena, arena->mapoff + offset, zerobuf,
- size, 0);
+ size, 0);
if (ret)
goto free;
@@ -438,7 +438,7 @@ static int btt_map_init(struct arena_info *arena)
cond_resched();
}
- free:
+free:
kfree(zerobuf);
return ret;
}
@@ -465,15 +465,15 @@ static int btt_log_init(struct arena_info *arena)
* is the case.
*/
dev_WARN_ONCE(to_dev(arena), !IS_ALIGNED(arena->logoff, 512),
- "arena->logoff: %#llx is unaligned\n", arena->logoff);
+ "arena->logoff: %#llx is unaligned\n", arena->logoff);
while (logsize) {
size_t size = min(logsize, chunk_size);
dev_WARN_ONCE(to_dev(arena), size < 512,
- "chunk size: %#zx is unaligned\n", size);
+ "chunk size: %#zx is unaligned\n", size);
ret = arena_write_bytes(arena, arena->logoff + offset, zerobuf,
- size, 0);
+ size, 0);
if (ret)
goto free;
@@ -492,7 +492,7 @@ static int btt_log_init(struct arena_info *arena)
goto free;
}
- free:
+free:
kfree(zerobuf);
return ret;
}
@@ -518,7 +518,7 @@ static int arena_clear_freelist_error(struct arena_info *arena, u32 lane)
unsigned long chunk = min(len, PAGE_SIZE);
ret = arena_write_bytes(arena, nsoff, zero_page,
- chunk, 0);
+ chunk, 0);
if (ret)
break;
len -= chunk;
@@ -538,7 +538,7 @@ static int btt_freelist_init(struct arena_info *arena)
u32 i, map_entry, log_oldmap, log_newmap;
arena->freelist = kcalloc(arena->nfree, sizeof(struct free_entry),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!arena->freelist)
return -ENOMEM;
@@ -561,12 +561,12 @@ static int btt_freelist_init(struct arena_info *arena)
* the BTT read-only
*/
if (ent_e_flag(log_new.old_map) &&
- !ent_normal(log_new.old_map)) {
+ !ent_normal(log_new.old_map)) {
arena->freelist[i].has_err = 1;
ret = arena_clear_freelist_error(arena, i);
if (ret)
dev_err_ratelimited(to_dev(arena),
- "Unable to clear known errors\n");
+ "Unable to clear known errors\n");
}
/* This implies a newly created or untouched flog entry */
@@ -575,7 +575,7 @@ static int btt_freelist_init(struct arena_info *arena)
/* Check if map recovery is needed */
ret = btt_map_read(arena, le32_to_cpu(log_new.lba), &map_entry,
- NULL, NULL, 0);
+ NULL, NULL, 0);
if (ret)
return ret;
@@ -592,7 +592,7 @@ static int btt_freelist_init(struct arena_info *arena)
* to complete the map write. So fix up the map.
*/
ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
- le32_to_cpu(log_new.new_map), 0, 0, 0);
+ le32_to_cpu(log_new.new_map), 0, 0, 0);
if (ret)
return ret;
}
@@ -641,7 +641,7 @@ static int log_set_indices(struct arena_info *arena)
} else {
/* Skip if index has been recorded */
if ((next_idx == 1) &&
- (j == log_index[0]))
+ (j == log_index[0]))
continue;
/* valid entry, record index */
log_index[next_idx] = j;
@@ -732,7 +732,7 @@ static int btt_maplocks_init(struct arena_info *arena)
u32 i;
arena->map_locks = kcalloc(arena->nfree, sizeof(struct aligned_lock),
- GFP_KERNEL);
+ GFP_KERNEL);
if (!arena->map_locks)
return -ENOMEM;
@@ -743,7 +743,7 @@ static int btt_maplocks_init(struct arena_info *arena)
}
static struct arena_info *alloc_arena(struct btt *btt, size_t size,
- size_t start, size_t arena_off)
+ size_t start, size_t arena_off)
{
struct arena_info *arena;
u64 logsize, mapsize, datasize;
@@ -763,7 +763,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
arena->external_lba_start = start;
arena->external_lbasize = btt->lbasize;
arena->internal_lbasize = roundup(arena->external_lbasize,
- INT_LBASIZE_ALIGNMENT);
+ INT_LBASIZE_ALIGNMENT);
arena->nfree = BTT_DEFAULT_NFREE;
arena->version_major = btt->nd_btt->version_major;
arena->version_minor = btt->nd_btt->version_minor;
@@ -780,7 +780,7 @@ static struct arena_info *alloc_arena(struct btt *btt, size_t size,
/* Calculate optimal split between map and data area */
arena->internal_nlba = div_u64(available - BTT_PG_SIZE,
- arena->internal_lbasize + MAP_ENT_SIZE);
+ arena->internal_lbasize + MAP_ENT_SIZE);
arena->external_nlba = arena->internal_nlba - arena->nfree;
mapsize = roundup((arena->external_nlba * MAP_ENT_SIZE), BTT_PG_SIZE);
@@ -818,7 +818,7 @@ static void free_arenas(struct btt *btt)
* populates the corresponding arena_info struct
*/
static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
- u64 arena_off)
+ u64 arena_off)
{
arena->internal_nlba = le32_to_cpu(super->internal_nlba);
arena->internal_lbasize = le32_to_cpu(super->internal_lbasize);
@@ -829,7 +829,7 @@ static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
arena->version_minor = le16_to_cpu(super->version_minor);
arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
- le64_to_cpu(super->nextoff));
+ le64_to_cpu(super->nextoff));
arena->infooff = arena_off;
arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
@@ -877,7 +877,7 @@ static int discover_arenas(struct btt *btt)
goto out;
} else {
dev_err(to_dev(arena),
- "Found corrupted metadata!\n");
+ "Found corrupted metadata!\n");
ret = -ENODEV;
goto out;
}
@@ -922,10 +922,10 @@ static int discover_arenas(struct btt *btt)
kfree(super);
return ret;
- out:
+out:
kfree(arena);
free_arenas(btt);
- out_super:
+out_super:
kfree(super);
return ret;
}
@@ -1048,7 +1048,7 @@ static int btt_meta_init(struct btt *btt)
btt->init_state = INIT_READY;
- unlock:
+unlock:
mutex_unlock(&btt->init_lock);
return ret;
}
@@ -1066,7 +1066,7 @@ static u32 btt_meta_size(struct btt *btt)
* so that this range search becomes faster.
*/
static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
- struct arena_info **arena)
+ struct arena_info **arena)
{
struct arena_info *arena_list;
__u64 lba = div_u64(sector << SECTOR_SHIFT, btt->sector_size);
@@ -1088,7 +1088,7 @@ static int lba_to_arena(struct btt *btt, sector_t sector, __u32 *premap,
* readability, since they index into an array of locks
*/
static void lock_map(struct arena_info *arena, u32 premap)
- __acquires(&arena->map_locks[idx].lock)
+ __acquires(&arena->map_locks[idx].lock)
{
u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
@@ -1096,7 +1096,7 @@ static void lock_map(struct arena_info *arena, u32 premap)
}
static void unlock_map(struct arena_info *arena, u32 premap)
- __releases(&arena->map_locks[idx].lock)
+ __releases(&arena->map_locks[idx].lock)
{
u32 idx = (premap * MAP_ENT_SIZE / L1_CACHE_BYTES) % arena->nfree;
@@ -1104,7 +1104,7 @@ static void unlock_map(struct arena_info *arena, u32 premap)
}
static int btt_data_read(struct arena_info *arena, struct page *page,
- unsigned int off, u32 lba, u32 len)
+ unsigned int off, u32 lba, u32 len)
{
int ret;
u64 nsoff = to_namespace_offset(arena, lba);
@@ -1117,7 +1117,7 @@ static int btt_data_read(struct arena_info *arena, struct page *page,
}
static int btt_data_write(struct arena_info *arena, u32 lba,
- struct page *page, unsigned int off, u32 len)
+ struct page *page, unsigned int off, u32 len)
{
int ret;
u64 nsoff = to_namespace_offset(arena, lba);
@@ -1139,7 +1139,7 @@ static void zero_fill_data(struct page *page, unsigned int off, u32 len)
#ifdef CONFIG_BLK_DEV_INTEGRITY
static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
- struct arena_info *arena, u32 postmap, int rw)
+ struct arena_info *arena, u32 postmap, int rw)
{
unsigned int len = btt_meta_size(btt);
u64 meta_nsoff;
@@ -1166,12 +1166,12 @@ static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
mem = kmap_atomic(bv.bv_page);
if (rw)
ret = arena_write_bytes(arena, meta_nsoff,
- mem + bv.bv_offset, cur_len,
- NVDIMM_IO_ATOMIC);
+ mem + bv.bv_offset, cur_len,
+ NVDIMM_IO_ATOMIC);
else
ret = arena_read_bytes(arena, meta_nsoff,
- mem + bv.bv_offset, cur_len,
- NVDIMM_IO_ATOMIC);
+ mem + bv.bv_offset, cur_len,
+ NVDIMM_IO_ATOMIC);
kunmap_atomic(mem);
if (ret)
@@ -1188,15 +1188,15 @@ static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
#else /* CONFIG_BLK_DEV_INTEGRITY */
static int btt_rw_integrity(struct btt *btt, struct bio_integrity_payload *bip,
- struct arena_info *arena, u32 postmap, int rw)
+ struct arena_info *arena, u32 postmap, int rw)
{
return 0;
}
#endif
static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
- struct page *page, unsigned int off, sector_t sector,
- unsigned int len)
+ struct page *page, unsigned int off, sector_t sector,
+ unsigned int len)
{
int ret = 0;
int t_flag, e_flag;
@@ -1215,7 +1215,7 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
cur_len = min(btt->sector_size, len);
ret = btt_map_read(arena, premap, &postmap, &t_flag, &e_flag,
- NVDIMM_IO_ATOMIC);
+ NVDIMM_IO_ATOMIC);
if (ret)
goto out_lane;
@@ -1246,12 +1246,12 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
barrier();
ret = btt_map_read(arena, premap, &new_map, &new_t,
- &new_e, NVDIMM_IO_ATOMIC);
+ &new_e, NVDIMM_IO_ATOMIC);
if (ret)
goto out_rtt;
if ((postmap == new_map) && (t_flag == new_t) &&
- (e_flag == new_e))
+ (e_flag == new_e))
break;
postmap = new_map;
@@ -1265,7 +1265,7 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
/* Media error - set the e_flag */
rc = btt_map_write(arena, premap, postmap, 0, 1,
- NVDIMM_IO_ATOMIC);
+ NVDIMM_IO_ATOMIC);
goto out_rtt;
}
@@ -1285,9 +1285,9 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
return 0;
- out_rtt:
+out_rtt:
arena->rtt[lane] = RTT_INVALID;
- out_lane:
+out_lane:
nd_region_release_lane(btt->nd_region, lane);
return ret;
}
@@ -1298,10 +1298,10 @@ static int btt_read_pg(struct btt *btt, struct bio_integrity_payload *bip,
* we need the final, raw namespace offset here
*/
static bool btt_is_badblock(struct btt *btt, struct arena_info *arena,
- u32 postmap)
+ u32 postmap)
{
u64 nsoff = adjust_initial_offset(arena->nd_btt,
- to_namespace_offset(arena, postmap));
+ to_namespace_offset(arena, postmap));
sector_t phys_sector = nsoff >> 9;
return is_bad_pmem(btt->phys_bb, phys_sector, arena->internal_lbasize);
@@ -1321,7 +1321,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
u32 cur_len;
int e_flag;
- retry:
+ retry:
lane = nd_region_acquire_lane(btt->nd_region);
ret = lba_to_arena(btt, sector, &premap, &arena);
@@ -1338,7 +1338,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
arena->freelist[lane].has_err = 1;
if (mutex_is_locked(&arena->err_lock)
- || arena->freelist[lane].has_err) {
+ || arena->freelist[lane].has_err) {
nd_region_release_lane(btt->nd_region, lane);
ret = arena_clear_freelist_error(arena, lane);
@@ -1368,14 +1368,14 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
if (bip) {
ret = btt_rw_integrity(btt, bip, arena, new_postmap,
- WRITE);
+ WRITE);
if (ret)
goto out_lane;
}
lock_map(arena, premap);
ret = btt_map_read(arena, premap, &old_postmap, NULL, &e_flag,
- NVDIMM_IO_ATOMIC);
+ NVDIMM_IO_ATOMIC);
if (ret)
goto out_map;
if (old_postmap >= arena->internal_nlba) {
@@ -1395,7 +1395,7 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
goto out_map;
ret = btt_map_write(arena, premap, new_postmap, 0, 0,
- NVDIMM_IO_ATOMIC);
+ NVDIMM_IO_ATOMIC);
if (ret)
goto out_map;
@@ -1415,16 +1415,16 @@ static int btt_write_pg(struct btt *btt, struct bio_integrity_payload *bip,
return 0;
- out_map:
+out_map:
unlock_map(arena, premap);
- out_lane:
+out_lane:
nd_region_release_lane(btt->nd_region, lane);
return ret;
}
static int btt_do_bvec(struct btt *btt, struct bio_integrity_payload *bip,
- struct page *page, unsigned int len, unsigned int off,
- unsigned int op, sector_t sector)
+ struct page *page, unsigned int len, unsigned int off,
+ unsigned int op, sector_t sector)
{
int ret;
@@ -1457,9 +1457,9 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
unsigned int len = bvec.bv_len;
if (len > PAGE_SIZE || len < btt->sector_size ||
- len % btt->sector_size) {
+ len % btt->sector_size) {
dev_err_ratelimited(&btt->nd_btt->dev,
- "unaligned bio segment (len: %d)\n", len);
+ "unaligned bio segment (len: %d)\n", len);
bio->bi_status = BLK_STS_IOERR;
break;
}
@@ -1468,10 +1468,10 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
bio_op(bio), iter.bi_sector);
if (err) {
dev_err(&btt->nd_btt->dev,
- "io error in %s sector %lld, len %d,\n",
- (op_is_write(bio_op(bio))) ? "WRITE" :
- "READ",
- (unsigned long long) iter.bi_sector, len);
+ "io error in %s sector %lld, len %d,\n",
+ (op_is_write(bio_op(bio))) ? "WRITE" :
+ "READ",
+ (unsigned long long) iter.bi_sector, len);
bio->bi_status = errno_to_blk_status(err);
break;
}
@@ -1484,7 +1484,7 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
}
static int btt_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, unsigned int op)
+ struct page *page, unsigned int op)
{
struct btt *btt = bdev->bd_disk->private_data;
int rc;
@@ -1538,7 +1538,7 @@ static int btt_blk_init(struct btt *btt)
btt->btt_disk->queue = btt->btt_queue;
btt->btt_disk->flags = GENHD_FL_EXT_DEVT;
btt->btt_disk->queue->backing_dev_info->capabilities |=
- BDI_CAP_SYNCHRONOUS_IO;
+ BDI_CAP_SYNCHRONOUS_IO;
blk_queue_make_request(btt->btt_queue, btt_make_request);
blk_queue_logical_block_size(btt->btt_queue, btt->sector_size);
@@ -1589,7 +1589,7 @@ static void btt_blk_cleanup(struct btt *btt)
* Pointer to a new struct btt on success, NULL on failure.
*/
static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
- u32 lbasize, u8 *uuid, struct nd_region *nd_region)
+ u32 lbasize, u8 *uuid, struct nd_region *nd_region)
{
int ret;
struct btt *btt;
@@ -1618,13 +1618,13 @@ static struct btt *btt_init(struct nd_btt *nd_btt, unsigned long long rawsize,
if (btt->init_state != INIT_READY && nd_region->ro) {
dev_warn(dev, "%s is read-only, unable to init btt metadata\n",
- dev_name(&nd_region->dev));
+ dev_name(&nd_region->dev));
return NULL;
} else if (btt->init_state != INIT_READY) {
btt->num_arenas = (rawsize / ARENA_MAX_SIZE) +
((rawsize % ARENA_MAX_SIZE) ? 1 : 0);
dev_dbg(dev, "init: %d arenas for %llu rawsize\n",
- btt->num_arenas, rawsize);
+ btt->num_arenas, rawsize);
ret = create_arenas(btt);
if (ret) {
@@ -1696,13 +1696,13 @@ int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns)
rawsize = nvdimm_namespace_capacity(ndns) - nd_btt->initial_offset;
if (rawsize < ARENA_MIN_SIZE) {
dev_dbg(&nd_btt->dev, "%s must be at least %ld bytes\n",
- dev_name(&ndns->dev),
- ARENA_MIN_SIZE + nd_btt->initial_offset);
+ dev_name(&ndns->dev),
+ ARENA_MIN_SIZE + nd_btt->initial_offset);
return -ENXIO;
}
nd_region = to_nd_region(nd_btt->dev.parent);
btt = btt_init(nd_btt, rawsize, nd_btt->lbasize, nd_btt->uuid,
- nd_region);
+ nd_region);
if (!btt)
return -ENOMEM;
nd_btt->btt = btt;
diff --git a/drivers/nvdimm/btt.h b/drivers/nvdimm/btt.h
index 2e258bee7db2..1da76da3e159 100644
--- a/drivers/nvdimm/btt.h
+++ b/drivers/nvdimm/btt.h
@@ -235,6 +235,6 @@ struct btt {
bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super);
int nd_btt_version(struct nd_btt *nd_btt, struct nd_namespace_common *ndns,
- struct btt_sb *btt_sb);
+ struct btt_sb *btt_sb);
#endif
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 3508a79110c7..9c4cbda834be 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -46,10 +46,10 @@ struct nd_btt *to_nd_btt(struct device *dev)
EXPORT_SYMBOL(to_nd_btt);
static const unsigned long btt_lbasize_supported[] = { 512, 520, 528,
- 4096, 4104, 4160, 4224, 0 };
+ 4096, 4104, 4160, 4224, 0 };
static ssize_t sector_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
@@ -57,7 +57,7 @@ static ssize_t sector_size_show(struct device *dev,
}
static ssize_t sector_size_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
@@ -65,9 +65,9 @@ static ssize_t sector_size_store(struct device *dev,
nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
- btt_lbasize_supported);
+ btt_lbasize_supported);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -76,7 +76,7 @@ static ssize_t sector_size_store(struct device *dev,
static DEVICE_ATTR_RW(sector_size);
static ssize_t uuid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
@@ -86,7 +86,7 @@ static ssize_t uuid_show(struct device *dev,
}
static ssize_t uuid_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
@@ -94,7 +94,7 @@ static ssize_t uuid_store(struct device *dev,
nd_device_lock(dev);
rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ buf[len - 1] == '\n' ? "" : "\n");
nd_device_unlock(dev);
return rc ? rc : len;
@@ -102,20 +102,20 @@ static ssize_t uuid_store(struct device *dev,
static DEVICE_ATTR_RW(uuid);
static ssize_t namespace_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
rc = sprintf(buf, "%s\n", nd_btt->ndns
- ? dev_name(&nd_btt->ndns->dev) : "");
+ ? dev_name(&nd_btt->ndns->dev) : "");
nvdimm_bus_unlock(dev);
return rc;
}
static ssize_t namespace_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
@@ -124,7 +124,7 @@ static ssize_t namespace_store(struct device *dev,
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -133,7 +133,7 @@ static ssize_t namespace_store(struct device *dev,
static DEVICE_ATTR_RW(namespace);
static ssize_t size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
@@ -152,7 +152,7 @@ static ssize_t size_show(struct device *dev,
static DEVICE_ATTR_RO(size);
static ssize_t log_zero_flags_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "Y\n");
}
@@ -179,8 +179,8 @@ static const struct attribute_group *nd_btt_attribute_groups[] = {
};
static struct device *__nd_btt_create(struct nd_region *nd_region,
- unsigned long lbasize, u8 *uuid,
- struct nd_namespace_common *ndns)
+ unsigned long lbasize, u8 *uuid,
+ struct nd_namespace_common *ndns)
{
struct nd_btt *nd_btt;
struct device *dev;
@@ -208,7 +208,7 @@ static struct device *__nd_btt_create(struct nd_region *nd_region,
device_initialize(&nd_btt->dev);
if (ndns && !__nd_attach_ndns(&nd_btt->dev, ndns, &nd_btt->ndns)) {
dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
- dev_name(ndns->claim));
+ dev_name(ndns->claim));
put_device(dev);
return NULL;
}
@@ -269,7 +269,7 @@ bool nd_btt_arena_is_valid(struct nd_btt *nd_btt, struct btt_sb *super)
EXPORT_SYMBOL(nd_btt_arena_is_valid);
int nd_btt_version(struct nd_btt *nd_btt, struct nd_namespace_common *ndns,
- struct btt_sb *btt_sb)
+ struct btt_sb *btt_sb)
{
if (ndns->claim_class == NVDIMM_CCLASS_BTT2) {
/* Probe/setup for BTT v2.0 */
@@ -281,7 +281,7 @@ int nd_btt_version(struct nd_btt *nd_btt, struct nd_namespace_common *ndns,
if (!nd_btt_arena_is_valid(nd_btt, btt_sb))
return -ENODEV;
if ((le16_to_cpu(btt_sb->version_major) != 2) ||
- (le16_to_cpu(btt_sb->version_minor) != 0))
+ (le16_to_cpu(btt_sb->version_minor) != 0))
return -ENODEV;
} else {
/*
@@ -296,7 +296,7 @@ int nd_btt_version(struct nd_btt *nd_btt, struct nd_namespace_common *ndns,
if (!nd_btt_arena_is_valid(nd_btt, btt_sb))
return -ENODEV;
if ((le16_to_cpu(btt_sb->version_major) != 1) ||
- (le16_to_cpu(btt_sb->version_minor) != 1))
+ (le16_to_cpu(btt_sb->version_minor) != 1))
return -ENODEV;
}
return 0;
@@ -304,7 +304,7 @@ int nd_btt_version(struct nd_btt *nd_btt, struct nd_namespace_common *ndns,
EXPORT_SYMBOL(nd_btt_version);
static int __nd_btt_probe(struct nd_btt *nd_btt,
- struct nd_namespace_common *ndns, struct btt_sb *btt_sb)
+ struct nd_namespace_common *ndns, struct btt_sb *btt_sb)
{
int rc;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 29479d3b01b0..83b6fcbb252d 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -48,7 +48,7 @@ static int to_nd_device_type(struct device *dev)
static int nvdimm_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
{
return add_uevent_var(env, "MODALIAS=" ND_DEVICE_MODALIAS_FMT,
- to_nd_device_type(dev));
+ to_nd_device_type(dev));
}
static struct module *to_bus_provider(struct device *dev)
@@ -88,7 +88,7 @@ static int nvdimm_bus_probe(struct device *dev)
return -ENXIO;
dev_dbg(&nvdimm_bus->dev, "START: %s.probe(%s)\n",
- dev->driver->name, dev_name(dev));
+ dev->driver->name, dev_name(dev));
nvdimm_bus_probe_start(nvdimm_bus);
debug_nvdimm_lock(dev);
@@ -102,7 +102,7 @@ static int nvdimm_bus_probe(struct device *dev)
nvdimm_bus_probe_end(nvdimm_bus);
dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n", dev->driver->name,
- dev_name(dev), rc);
+ dev_name(dev), rc);
if (rc != 0)
module_put(provider);
@@ -124,7 +124,7 @@ static int nvdimm_bus_remove(struct device *dev)
nd_region_disable(nvdimm_bus, dev);
dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
- dev_name(dev), rc);
+ dev_name(dev), rc);
module_put(provider);
return rc;
}
@@ -140,7 +140,7 @@ static void nvdimm_bus_shutdown(struct device *dev)
if (nd_drv && nd_drv->shutdown) {
nd_drv->shutdown(dev);
dev_dbg(&nvdimm_bus->dev, "%s.shutdown(%s)\n",
- dev->driver->name, dev_name(dev));
+ dev->driver->name, dev_name(dev));
}
}
@@ -190,7 +190,7 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
/* make sure we are in the region */
if (ctx->phys < nd_region->ndr_start
- || (ctx->phys + ctx->cleared) > ndr_end)
+ || (ctx->phys + ctx->cleared) > ndr_end)
return 0;
sector = (ctx->phys - nd_region->ndr_start) / 512;
@@ -203,7 +203,7 @@ static int nvdimm_clear_badblocks_region(struct device *dev, void *data)
}
static void nvdimm_clear_badblocks_regions(struct nvdimm_bus *nvdimm_bus,
- phys_addr_t phys, u64 cleared)
+ phys_addr_t phys, u64 cleared)
{
struct clear_badblocks_context ctx = {
.phys = phys,
@@ -211,11 +211,11 @@ static void nvdimm_clear_badblocks_regions(struct nvdimm_bus *nvdimm_bus,
};
device_for_each_child(&nvdimm_bus->dev, &ctx,
- nvdimm_clear_badblocks_region);
+ nvdimm_clear_badblocks_region);
}
static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
- phys_addr_t phys, u64 cleared)
+ phys_addr_t phys, u64 cleared)
{
if (cleared > 0)
badrange_forget(&nvdimm_bus->badrange, phys, cleared);
@@ -225,7 +225,7 @@ static void nvdimm_account_cleared_poison(struct nvdimm_bus *nvdimm_bus,
}
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
- unsigned int len)
+ unsigned int len)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc;
@@ -251,7 +251,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
ars_cap.length = len;
noio_flag = memalloc_noio_save();
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_CAP, &ars_cap,
- sizeof(ars_cap), &cmd_rc);
+ sizeof(ars_cap), &cmd_rc);
memalloc_noio_restore(noio_flag);
if (rc < 0)
return rc;
@@ -269,7 +269,7 @@ long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
clear_err.length = len;
noio_flag = memalloc_noio_save();
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_CLEAR_ERROR, &clear_err,
- sizeof(clear_err), &cmd_rc);
+ sizeof(clear_err), &cmd_rc);
memalloc_noio_restore(noio_flag);
if (rc < 0)
return rc;
@@ -337,7 +337,7 @@ struct nvdimm_bus *nvdimm_to_bus(struct nvdimm *nvdimm)
EXPORT_SYMBOL_GPL(nvdimm_to_bus);
struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
- struct nvdimm_bus_descriptor *nd_desc)
+ struct nvdimm_bus_descriptor *nd_desc)
{
struct nvdimm_bus *nvdimm_bus;
int rc;
@@ -369,7 +369,7 @@ struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
}
return nvdimm_bus;
- err:
+err:
put_device(&nvdimm_bus->dev);
return NULL;
}
@@ -433,7 +433,7 @@ static int nd_bus_remove(struct device *dev)
mutex_unlock(&nvdimm_bus_list_mutex);
wait_event(nvdimm_bus->wait,
- atomic_read(&nvdimm_bus->ioctl_active) == 0);
+ atomic_read(&nvdimm_bus->ioctl_active) == 0);
nd_synchronize();
device_for_each_child(&nvdimm_bus->dev, NULL, child_unregister);
@@ -571,7 +571,7 @@ void nd_device_unregister(struct device *dev, enum nd_async_mode mode)
get_device(dev);
async_schedule_domain(nd_async_device_unregister, dev,
- &nd_async_domain);
+ &nd_async_domain);
break;
case ND_SYNC:
/*
@@ -602,13 +602,13 @@ EXPORT_SYMBOL(nd_device_unregister);
* @mod_name: automatically set by nd_driver_register() macro
*/
int __nd_driver_register(struct nd_device_driver *nd_drv, struct module *owner,
- const char *mod_name)
+ const char *mod_name)
{
struct device_driver *drv = &nd_drv->drv;
if (!nd_drv->type) {
pr_debug("driver type bitmask not set (%ps)\n",
- __builtin_return_address(0));
+ __builtin_return_address(0));
return -EINVAL;
}
@@ -639,7 +639,7 @@ int nvdimm_revalidate_disk(struct gendisk *disk)
return 0;
dev_info(dev, "%s read-only, marking %s read-only\n",
- dev_name(&nd_region->dev), disk->disk_name);
+ dev_name(&nd_region->dev), disk->disk_name);
set_disk_ro(disk, 1);
return 0;
@@ -648,15 +648,15 @@ int nvdimm_revalidate_disk(struct gendisk *disk)
EXPORT_SYMBOL(nvdimm_revalidate_disk);
static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
return sprintf(buf, ND_DEVICE_MODALIAS_FMT "\n",
- to_nd_device_type(dev));
+ to_nd_device_type(dev));
}
static DEVICE_ATTR_RO(modalias);
static ssize_t devtype_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
return sprintf(buf, "%s\n", dev->type->name);
}
@@ -677,7 +677,7 @@ struct attribute_group nd_device_attribute_group = {
EXPORT_SYMBOL_GPL(nd_device_attribute_group);
static ssize_t numa_node_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", dev_to_node(dev));
}
@@ -689,7 +689,7 @@ static struct attribute *nd_numa_attributes[] = {
};
static umode_t nd_numa_attr_visible(struct kobject *kobj, struct attribute *a,
- int n)
+ int n)
{
if (!IS_ENABLED(CONFIG_NUMA))
return 0;
@@ -712,11 +712,11 @@ int nvdimm_bus_create_ndctl(struct nvdimm_bus *nvdimm_bus)
struct device *dev;
dev = device_create(nd_class, &nvdimm_bus->dev, devt, nvdimm_bus,
- "ndctl%d", nvdimm_bus->id);
+ "ndctl%d", nvdimm_bus->id);
if (IS_ERR(dev))
dev_dbg(&nvdimm_bus->dev, "failed to register ndctl%d: %ld\n",
- nvdimm_bus->id, PTR_ERR(dev));
+ nvdimm_bus->id, PTR_ERR(dev));
return PTR_ERR_OR_ZERO(dev);
}
@@ -818,7 +818,7 @@ const struct nd_cmd_desc *nd_cmd_bus_desc(int cmd)
EXPORT_SYMBOL_GPL(nd_cmd_bus_desc);
u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
- const struct nd_cmd_desc *desc, int idx, void *buf)
+ const struct nd_cmd_desc *desc, int idx, void *buf)
{
if (idx >= desc->in_num)
return UINT_MAX;
@@ -845,8 +845,8 @@ u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
EXPORT_SYMBOL_GPL(nd_cmd_in_size);
u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
- const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
- const u32 *out_field, unsigned long remainder)
+ const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
+ const u32 *out_field, unsigned long remainder)
{
if (idx >= desc->out_num)
return UINT_MAX;
@@ -896,7 +896,7 @@ void wait_nvdimm_bus_probe_idle(struct device *dev)
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
wait_event(nvdimm_bus->wait,
- nvdimm_bus->probe_active == 0);
+ nvdimm_bus->probe_active == 0);
nd_device_lock(dev);
nvdimm_bus_lock(dev);
} while (true);
@@ -950,7 +950,7 @@ static int nd_ns_forget_poison_check(struct device *dev, void *data)
/* set_config requires an idle interleave set */
static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
- struct nvdimm *nvdimm, unsigned int cmd, void *data)
+ struct nvdimm *nvdimm, unsigned int cmd, void *data)
{
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
@@ -965,7 +965,7 @@ static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
/* require clear error to go through the pmem driver */
if (!nvdimm && cmd == ND_CMD_CLEAR_ERROR)
return device_for_each_child(&nvdimm_bus->dev, data,
- nd_ns_forget_poison_check);
+ nd_ns_forget_poison_check);
if (!nvdimm || cmd != ND_CMD_SET_CONFIG_DATA)
return 0;
@@ -978,7 +978,7 @@ static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
}
static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
- int read_only, unsigned int ioctl_cmd, unsigned long arg)
+ int read_only, unsigned int ioctl_cmd, unsigned long arg)
{
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
const struct nd_cmd_desc *desc = NULL;
@@ -1013,7 +1013,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
}
if (!desc || (desc->out_num + desc->in_num == 0) ||
- !test_bit(cmd, &cmd_mask))
+ !test_bit(cmd, &cmd_mask))
return -ENOTTY;
/* fail write commands (when read-only) */
@@ -1025,8 +1025,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
case ND_CMD_CLEAR_ERROR:
case ND_CMD_CALL:
dev_dbg(dev, "'%s' command while read-only.\n",
- nvdimm ? nvdimm_cmd_name(cmd)
- : nvdimm_bus_cmd_name(cmd));
+ nvdimm ? nvdimm_cmd_name(cmd)
+ : nvdimm_bus_cmd_name(cmd));
return -EPERM;
default:
break;
@@ -1042,7 +1042,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
in_size = nd_cmd_in_size(nvdimm, cmd, desc, i, in_env);
if (in_size == UINT_MAX) {
dev_err(dev, "%s:%s unknown input size cmd: %s field: %d\n",
- __func__, dimm_name, cmd_name, i);
+ __func__, dimm_name, cmd_name, i);
rc = -ENXIO;
goto out;
}
@@ -1060,8 +1060,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (cmd == ND_CMD_CALL) {
func = pkg.nd_command;
dev_dbg(dev, "%s, idx: %llu, in: %u, out: %u, len %llu\n",
- dimm_name, pkg.nd_command,
- in_len, out_len, buf_len);
+ dimm_name, pkg.nd_command,
+ in_len, out_len, buf_len);
}
/* process an output envelope */
@@ -1073,12 +1073,12 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
for (i = 0; i < desc->out_num; i++) {
u32 out_size = nd_cmd_out_size(nvdimm, cmd, desc, i,
- (u32 *) in_env, (u32 *) out_env, 0);
+ (u32 *) in_env, (u32 *) out_env, 0);
u32 copy;
if (out_size == UINT_MAX) {
dev_dbg(dev, "%s unknown output size cmd: %s field: %d\n",
- dimm_name, cmd_name, i);
+ dimm_name, cmd_name, i);
rc = -EFAULT;
goto out;
}
@@ -1087,7 +1087,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
else
copy = 0;
if (copy && copy_from_user(&out_env[out_len],
- p + in_len + out_len, copy)) {
+ p + in_len + out_len, copy)) {
rc = -EFAULT;
goto out;
}
@@ -1097,7 +1097,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
buf_len = (u64) out_len + (u64) in_len;
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
- cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
+ cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
rc = -EINVAL;
goto out;
}
@@ -1127,7 +1127,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
struct nd_cmd_clear_error *clear_err = buf;
nvdimm_account_cleared_poison(nvdimm_bus, clear_err->address,
- clear_err->cleared);
+ clear_err->cleared);
}
if (copy_to_user(p, buf, buf_len))
@@ -1162,7 +1162,7 @@ static int match_dimm(struct device *dev, void *data)
}
static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
- enum nd_ioctl_mode mode)
+ enum nd_ioctl_mode mode)
{
struct nvdimm_bus *nvdimm_bus, *found = NULL;
@@ -1177,7 +1177,7 @@ static long nd_ioctl(struct file *file, unsigned int cmd, unsigned long arg,
struct device *dev;
dev = device_find_child(&nvdimm_bus->dev,
- file->private_data, match_dimm);
+ file->private_data, match_dimm);
if (!dev)
continue;
nvdimm = to_nvdimm(dev);
@@ -1271,13 +1271,13 @@ int __init nvdimm_bus_init(void)
return 0;
- err_nd_bus:
+err_nd_bus:
class_destroy(nd_class);
- err_class:
+err_class:
unregister_chrdev(nvdimm_major, "dimmctl");
- err_dimm_chrdev:
+err_dimm_chrdev:
unregister_chrdev(nvdimm_bus_major, "ndctl");
- err_bus_chrdev:
+err_bus_chrdev:
bus_unregister(&nvdimm_bus_type);
return rc;
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 2985ca949912..62f3afaa7d27 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -27,7 +27,7 @@ void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns)
}
void nd_detach_ndns(struct device *dev,
- struct nd_namespace_common **_ndns)
+ struct nd_namespace_common **_ndns)
{
struct nd_namespace_common *ndns = *_ndns;
@@ -41,7 +41,7 @@ void nd_detach_ndns(struct device *dev,
}
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
- struct nd_namespace_common **_ndns)
+ struct nd_namespace_common **_ndns)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&attach->dev);
@@ -56,7 +56,7 @@ bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
}
bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
- struct nd_namespace_common **_ndns)
+ struct nd_namespace_common **_ndns)
{
bool claimed;
@@ -110,7 +110,7 @@ struct nd_pfn *to_nd_pfn_safe(struct device *dev)
}
static void nd_detach_and_reset(struct device *dev,
- struct nd_namespace_common **_ndns)
+ struct nd_namespace_common **_ndns)
{
/* detach the namespace and destroy / reset the device */
__nd_detach_ndns(dev, _ndns);
@@ -132,8 +132,8 @@ static void nd_detach_and_reset(struct device *dev,
}
ssize_t nd_namespace_store(struct device *dev,
- struct nd_namespace_common **_ndns, const char *buf,
- size_t len)
+ struct nd_namespace_common **_ndns, const char *buf,
+ size_t len)
{
struct nd_namespace_common *ndns;
struct device *found;
@@ -162,7 +162,7 @@ ssize_t nd_namespace_store(struct device *dev,
goto out;
} else if (ndns) {
dev_dbg(dev, "namespace already set to: %s\n",
- dev_name(&ndns->dev));
+ dev_name(&ndns->dev));
len = -EBUSY;
goto out;
}
@@ -170,7 +170,7 @@ ssize_t nd_namespace_store(struct device *dev,
found = device_find_child(dev->parent, name, namespace_match);
if (!found) {
dev_dbg(dev, "'%s' not found under %s\n", name,
- dev_name(dev->parent));
+ dev_name(dev->parent));
len = -ENODEV;
goto out;
}
@@ -214,13 +214,13 @@ ssize_t nd_namespace_store(struct device *dev,
WARN_ON_ONCE(!is_nvdimm_bus_locked(dev));
if (!__nd_attach_ndns(dev, ndns, _ndns)) {
dev_dbg(dev, "%s already claimed\n",
- dev_name(&ndns->dev));
+ dev_name(&ndns->dev));
len = -EBUSY;
}
- out_attach:
+out_attach:
put_device(&ndns->dev); /* from device_find_child */
- out:
+out:
kfree(name);
return len;
}
@@ -249,8 +249,8 @@ u64 nd_sb_checksum(struct nd_gen_sb *nd_gen_sb)
EXPORT_SYMBOL(nd_sb_checksum);
static int nsio_rw_bytes(struct nd_namespace_common *ndns,
- resource_size_t offset, void *buf, size_t size, int rw,
- unsigned long flags)
+ resource_size_t offset, void *buf, size_t size, int rw,
+ unsigned long flags)
{
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
unsigned int sz_align = ALIGN(size + (offset & (512 - 1)), 512);
@@ -275,12 +275,12 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
- && !(flags & NVDIMM_IO_ATOMIC)) {
+ && !(flags & NVDIMM_IO_ATOMIC)) {
long cleared;
might_sleep();
cleared = nvdimm_clear_poison(&ndns->dev,
- nsio->res.start + offset, size);
+ nsio->res.start + offset, size);
if (cleared < size)
rc = -EIO;
if (cleared > 0 && cleared / 512) {
@@ -307,7 +307,7 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
nsio->size = resource_size(res);
if (!devm_request_mem_region(dev, res->start, resource_size(res),
- dev_name(&ndns->dev))) {
+ dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", res);
return -EBUSY;
}
@@ -316,10 +316,10 @@ int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio)
if (devm_init_badblocks(dev, &nsio->bb))
return -ENOMEM;
nvdimm_badblocks_populate(to_nd_region(ndns->dev.parent), &nsio->bb,
- &nsio->res);
+ &nsio->res);
nsio->addr = devm_memremap(dev, res->start, resource_size(res),
- ARCH_MEMREMAP_PMEM);
+ ARCH_MEMREMAP_PMEM);
return PTR_ERR_OR_ZERO(nsio->addr);
}
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index 9204f1e9fd14..b3ff3e62d847 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -63,7 +63,7 @@ struct nvdimm_map {
};
static struct nvdimm_map *find_nvdimm_map(struct device *dev,
- resource_size_t offset)
+ resource_size_t offset)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct nvdimm_map *nvdimm_map;
@@ -75,7 +75,7 @@ static struct nvdimm_map *find_nvdimm_map(struct device *dev,
}
static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
- resource_size_t offset, size_t size, unsigned long flags)
+ resource_size_t offset, size_t size, unsigned long flags)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
struct nvdimm_map *nvdimm_map;
@@ -93,7 +93,7 @@ static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev))) {
dev_err(&nvdimm_bus->dev, "failed to request %pa + %zd for %s\n",
- &offset, size, dev_name(dev));
+ &offset, size, dev_name(dev));
goto err_request_region;
}
@@ -106,14 +106,14 @@ static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
goto err_map;
dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!",
- __func__);
+ __func__);
list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list);
return nvdimm_map;
- err_map:
+err_map:
release_mem_region(offset, size);
- err_request_region:
+err_request_region:
kfree(nvdimm_map);
return NULL;
}
@@ -154,7 +154,7 @@ static void nvdimm_map_put(void *data)
* @flags: memremap flags, or, if zero, perform an ioremap instead
*/
void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
- size_t size, unsigned long flags)
+ size_t size, unsigned long flags)
{
struct nvdimm_map *nvdimm_map;
@@ -214,7 +214,7 @@ static bool is_uuid_sep(char sep)
}
static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
- size_t len)
+ size_t len)
{
const char *str = buf;
u8 uuid[16];
@@ -223,8 +223,8 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
for (i = 0; i < 16; i++) {
if (!isxdigit(str[0]) || !isxdigit(str[1])) {
dev_dbg(dev, "pos: %d buf[%zd]: %c buf[%zd]: %c\n",
- i, str - buf, str[0],
- str + 1 - buf, str[1]);
+ i, str - buf, str[0],
+ str + 1 - buf, str[1]);
return -EINVAL;
}
@@ -249,7 +249,7 @@ static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
* LOCKING: expects nd_device_lock() is held on entry
*/
int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
- size_t len)
+ size_t len)
{
u8 uuid[16];
int rc;
@@ -270,7 +270,7 @@ int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
}
ssize_t nd_size_select_show(unsigned long current_size,
- const unsigned long *supported, char *buf)
+ const unsigned long *supported, char *buf)
{
ssize_t len = 0;
int i;
@@ -285,7 +285,7 @@ ssize_t nd_size_select_show(unsigned long current_size,
}
ssize_t nd_size_select_store(struct device *dev, const char *buf,
- unsigned long *current_size, const unsigned long *supported)
+ unsigned long *current_size, const unsigned long *supported)
{
unsigned long lbasize;
int rc, i;
@@ -310,7 +310,7 @@ ssize_t nd_size_select_store(struct device *dev, const char *buf,
}
static ssize_t commands_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
int cmd, len = 0;
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
@@ -337,7 +337,7 @@ static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus)
}
static ssize_t provider_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
@@ -361,7 +361,7 @@ static int flush_regions_dimms(struct device *dev, void *data)
}
static ssize_t wait_probe_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
@@ -442,9 +442,9 @@ static __init int libnvdimm_init(void)
nd_label_init();
return 0;
- err_region:
+err_region:
nvdimm_exit();
- err_dimm:
+err_dimm:
nvdimm_bus_exit();
return rc;
}
diff --git a/drivers/nvdimm/dimm.c b/drivers/nvdimm/dimm.c
index 64776ed15bb3..916710ae647f 100644
--- a/drivers/nvdimm/dimm.c
+++ b/drivers/nvdimm/dimm.c
@@ -108,7 +108,7 @@ static int nvdimm_probe(struct device *dev)
return 0;
- err:
+err:
put_ndd(ndd);
return rc;
}
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index 196aa44c4936..52b00078939b 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -51,7 +51,7 @@ static int validate_dimm(struct nvdimm_drvdata *ndd)
rc = nvdimm_check_config_data(ndd->dev);
if (rc)
dev_dbg(ndd->dev, "%ps: %s error: %d\n",
- __builtin_return_address(0), __func__, rc);
+ __builtin_return_address(0), __func__, rc);
return rc;
}
@@ -76,7 +76,7 @@ int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
memset(cmd, 0, sizeof(*cmd));
nd_desc = nvdimm_bus->nd_desc;
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
+ ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
if (rc < 0)
return rc;
return cmd_rc;
@@ -112,7 +112,7 @@ int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
cmd_size = sizeof(*cmd) + cmd->in_length;
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
+ ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
if (rc < 0)
break;
if (cmd_rc < 0) {
@@ -129,7 +129,7 @@ int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
}
int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
- void *buf, size_t len)
+ void *buf, size_t len)
{
size_t max_cmd_size, buf_offset;
struct nd_cmd_set_config_hdr *cmd;
@@ -149,7 +149,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
return -ENOMEM;
for (buf_offset = 0; len; len -= cmd->in_length,
- buf_offset += cmd->in_length) {
+ buf_offset += cmd->in_length) {
size_t cmd_size;
cmd->in_offset = offset + buf_offset;
@@ -160,7 +160,7 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
+ ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
if (rc < 0)
break;
if (cmd_rc < 0) {
@@ -302,7 +302,7 @@ void *nvdimm_provider_data(struct nvdimm *nvdimm)
EXPORT_SYMBOL_GPL(nvdimm_provider_data);
static ssize_t commands_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
int cmd, len = 0;
@@ -318,18 +318,18 @@ static ssize_t commands_show(struct device *dev,
static DEVICE_ATTR_RO(commands);
static ssize_t flags_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
return sprintf(buf, "%s%s\n",
- test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
- test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
+ test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
+ test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
}
static DEVICE_ATTR_RO(flags);
static ssize_t state_show(struct device *dev, struct device_attribute *attr,
- char *buf)
+ char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
@@ -340,12 +340,12 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
nvdimm_bus_lock(dev);
nvdimm_bus_unlock(dev);
return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
- ? "active" : "idle");
+ ? "active" : "idle");
}
static DEVICE_ATTR_RO(state);
static ssize_t available_slots_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
ssize_t rc;
@@ -368,7 +368,7 @@ static ssize_t available_slots_show(struct device *dev,
static DEVICE_ATTR_RO(available_slots);
__weak ssize_t security_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
@@ -384,17 +384,17 @@ __weak ssize_t security_show(struct device *dev,
}
static ssize_t frozen_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nvdimm *nvdimm = to_nvdimm(dev);
return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
- &nvdimm->sec.flags));
+ &nvdimm->sec.flags));
}
static DEVICE_ATTR_RO(frozen);
static ssize_t security_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
ssize_t rc;
@@ -438,9 +438,9 @@ static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
if (a == &dev_attr_security.attr) {
/* Are there any state mutation ops (make writable)? */
if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
- || nvdimm->sec.ops->change_key
- || nvdimm->sec.ops->erase
- || nvdimm->sec.ops->overwrite)
+ || nvdimm->sec.ops->change_key
+ || nvdimm->sec.ops->erase
+ || nvdimm->sec.ops->overwrite)
return a->mode;
return 0444;
}
@@ -457,10 +457,10 @@ struct attribute_group nvdimm_attribute_group = {
EXPORT_SYMBOL_GPL(nvdimm_attribute_group);
struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
- void *provider_data, const struct attribute_group **groups,
- unsigned long flags, unsigned long cmd_mask, int num_flush,
- struct resource *flush_wpq, const char *dimm_id,
- const struct nvdimm_security_ops *sec_ops)
+ void *provider_data, const struct attribute_group **groups,
+ unsigned long flags, unsigned long cmd_mask, int num_flush,
+ struct resource *flush_wpq, const char *dimm_id,
+ const struct nvdimm_security_ops *sec_ops)
{
struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
struct device *dev;
@@ -517,7 +517,7 @@ int nvdimm_security_setup_events(struct device *dev)
struct nvdimm *nvdimm = to_nvdimm(dev);
if (!nvdimm->sec.flags || !nvdimm->sec.ops
- || !nvdimm->sec.ops->overwrite)
+ || !nvdimm->sec.ops->overwrite)
return 0;
nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
if (!nvdimm->sec.overwrite_state)
@@ -590,13 +590,13 @@ int alias_dpa_busy(struct device *dev, void *data)
*/
if (info->res) {
if (info->res->start >= nd_mapping->start
- && info->res->start < map_end)
+ && info->res->start < map_end)
/* pass */;
else
return 0;
}
- retry:
+retry:
/*
* Find the free dpa from the end of the last pmem allocation to
* the end of the interleave-set mapping.
@@ -605,8 +605,8 @@ int alias_dpa_busy(struct device *dev, void *data)
if (strncmp(res->name, "pmem", 4) != 0)
continue;
if ((res->start >= blk_start && res->start < map_end)
- || (res->end >= blk_start
- && res->end <= map_end)) {
+ || (res->end >= blk_start
+ && res->end <= map_end)) {
new = max(blk_start, min(map_end + 1, res->end + 1));
if (new != blk_start) {
blk_start = new;
@@ -710,7 +710,7 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
* the set can be established.
*/
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, resource_size_t *overlap)
+ struct nd_mapping *nd_mapping, resource_size_t *overlap)
{
resource_size_t map_start, map_end, busy = 0, available, blk_start;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -756,7 +756,7 @@ resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
return available - busy;
return 0;
- err:
+err:
nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
return 0;
}
@@ -769,8 +769,8 @@ void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
}
struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
- struct nd_label_id *label_id, resource_size_t start,
- resource_size_t n)
+ struct nd_label_id *label_id, resource_size_t start,
+ resource_size_t n)
{
char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
struct resource *res;
@@ -791,7 +791,7 @@ struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
* @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
*/
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
- struct nd_label_id *label_id)
+ struct nd_label_id *label_id)
{
resource_size_t allocated = 0;
struct resource *res;
diff --git a/drivers/nvdimm/e820.c b/drivers/nvdimm/e820.c
index 87f72f725e4f..adfeaf5c3c23 100644
--- a/drivers/nvdimm/e820.c
+++ b/drivers/nvdimm/e820.c
@@ -71,7 +71,7 @@ static int e820_pmem_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, nvdimm_bus);
rc = walk_iomem_res_desc(IORES_DESC_PERSISTENT_MEMORY_LEGACY,
- IORESOURCE_MEM, 0, -1, nvdimm_bus, e820_register_one);
+ IORESOURCE_MEM, 0, -1, nvdimm_bus, e820_register_one);
if (rc)
goto err;
return 0;
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 73e197babc2f..ebfad5183b23 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -42,14 +42,14 @@ unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
static size_t __sizeof_namespace_index(u32 nslot)
{
return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
- NSINDEX_ALIGN);
+ NSINDEX_ALIGN);
}
static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
- size_t index_size)
+ size_t index_size)
{
return (ndd->nsarea.config_size - index_size * 2) /
- sizeof_namespace_label(ndd);
+ sizeof_namespace_label(ndd);
}
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
@@ -79,7 +79,7 @@ size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
return size / 2;
dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
- ndd->nsarea.config_size, sizeof_namespace_label(ndd));
+ ndd->nsarea.config_size, sizeof_namespace_label(ndd));
return 0;
}
@@ -144,7 +144,7 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
if (labelsize != sizeof_namespace_label(ndd)) {
dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
- i, nsindex[i]->labelsize);
+ i, nsindex[i]->labelsize);
continue;
}
@@ -165,40 +165,40 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
/* sanity check the index against expected values */
if (__le64_to_cpu(nsindex[i]->myoff)
- != i * sizeof_namespace_index(ndd)) {
+ != i * sizeof_namespace_index(ndd)) {
dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
- i, (unsigned long long)
- __le64_to_cpu(nsindex[i]->myoff));
+ i, (unsigned long long)
+ __le64_to_cpu(nsindex[i]->myoff));
continue;
}
if (__le64_to_cpu(nsindex[i]->otheroff)
- != (!i) * sizeof_namespace_index(ndd)) {
+ != (!i) * sizeof_namespace_index(ndd)) {
dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
- i, (unsigned long long)
- __le64_to_cpu(nsindex[i]->otheroff));
+ i, (unsigned long long)
+ __le64_to_cpu(nsindex[i]->otheroff));
continue;
}
if (__le64_to_cpu(nsindex[i]->labeloff)
- != 2 * sizeof_namespace_index(ndd)) {
+ != 2 * sizeof_namespace_index(ndd)) {
dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
- i, (unsigned long long)
- __le64_to_cpu(nsindex[i]->labeloff));
+ i, (unsigned long long)
+ __le64_to_cpu(nsindex[i]->labeloff));
continue;
}
size = __le64_to_cpu(nsindex[i]->mysize);
if (size > sizeof_namespace_index(ndd)
- || size < sizeof(struct nd_namespace_index)) {
+ || size < sizeof(struct nd_namespace_index)) {
dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
continue;
}
nslot = __le32_to_cpu(nsindex[i]->nslot);
if (nslot * sizeof_namespace_label(ndd)
- + 2 * sizeof_namespace_index(ndd)
- > ndd->nsarea.config_size) {
+ + 2 * sizeof_namespace_index(ndd)
+ > ndd->nsarea.config_size) {
dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
- i, nslot, ndd->nsarea.config_size);
+ i, nslot, ndd->nsarea.config_size);
continue;
}
valid[i] = true;
@@ -218,7 +218,7 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
default:
/* pick the best index... */
seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
- __le32_to_cpu(nsindex[1]->seq));
+ __le32_to_cpu(nsindex[1]->seq));
if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
return 1;
else
@@ -271,7 +271,7 @@ static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
}
static int to_slot(struct nvdimm_drvdata *ndd,
- struct nd_namespace_label *nd_label)
+ struct nd_namespace_label *nd_label)
{
unsigned long label, base;
@@ -291,9 +291,9 @@ static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
return (struct nd_namespace_label *) label;
}
-#define for_each_clear_bit_le(bit, addr, size) \
- for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
- (bit) < (size); \
+#define for_each_clear_bit_le(bit, addr, size) \
+ for ((bit) = find_next_zero_bit_le((addr), (size), 0); \
+ (bit) < (size); \
(bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
/**
@@ -305,8 +305,8 @@ static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
* @nslot: on return set to the number of slots in the label space
*/
static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
- struct nd_namespace_index **nsindex_out,
- unsigned long **free, u32 *nslot)
+ struct nd_namespace_index **nsindex_out,
+ unsigned long **free, u32 *nslot)
{
struct nd_namespace_index *nsindex;
@@ -326,28 +326,28 @@ char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
if (!label_id || !uuid)
return NULL;
snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
- flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
+ flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
return label_id->id;
}
static bool preamble_current(struct nvdimm_drvdata *ndd,
- struct nd_namespace_index **nsindex,
- unsigned long **free, u32 *nslot)
+ struct nd_namespace_index **nsindex,
+ unsigned long **free, u32 *nslot)
{
return preamble_index(ndd, ndd->ns_current, nsindex,
- free, nslot);
+ free, nslot);
}
static bool preamble_next(struct nvdimm_drvdata *ndd,
- struct nd_namespace_index **nsindex,
- unsigned long **free, u32 *nslot)
+ struct nd_namespace_index **nsindex,
+ unsigned long **free, u32 *nslot)
{
return preamble_index(ndd, ndd->ns_next, nsindex,
- free, nslot);
+ free, nslot);
}
static bool slot_valid(struct nvdimm_drvdata *ndd,
- struct nd_namespace_label *nd_label, u32 slot)
+ struct nd_namespace_label *nd_label, u32 slot)
{
/* check that we are written where we expect to be written */
if (slot != __le32_to_cpu(nd_label->slot))
@@ -355,7 +355,7 @@ static bool slot_valid(struct nvdimm_drvdata *ndd,
/* check that DPA allocations are page aligned */
if ((__le64_to_cpu(nd_label->dpa)
- | __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
+ | __le64_to_cpu(nd_label->rawsize)) % SZ_4K)
return false;
/* check checksum */
@@ -405,8 +405,8 @@ int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
flags &= ~NSLABEL_FLAG_LOCAL;
nd_label_gen_id(&label_id, label_uuid, flags);
res = nvdimm_allocate_dpa(ndd, &label_id,
- __le64_to_cpu(nd_label->dpa),
- __le64_to_cpu(nd_label->rawsize));
+ __le64_to_cpu(nd_label->dpa),
+ __le64_to_cpu(nd_label->rawsize));
nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
if (!res)
return -EBUSY;
@@ -464,7 +464,7 @@ int nd_label_data_init(struct nvdimm_drvdata *ndd)
if (read_size < max_xfer) {
/* trim waste */
max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
- DIV_ROUND_UP(config_size, max_xfer);
+ DIV_ROUND_UP(config_size, max_xfer);
/* make certain we read indexes in exactly 1 read */
if (max_xfer < read_size)
max_xfer = read_size;
@@ -516,7 +516,7 @@ int nd_label_data_init(struct nvdimm_drvdata *ndd)
/* determine how much more will be read after this next call. */
label_read_size = offset + ndd->nslabel_size - read_size;
label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
- max_xfer;
+ max_xfer;
/* truncate last read if needed */
if (read_size + label_read_size > config_size)
@@ -559,7 +559,7 @@ int nd_label_active_count(struct nvdimm_drvdata *ndd)
dev_dbg(ndd->dev,
"slot%d invalid slot: %d dpa: %llx size: %llx\n",
- slot, label_slot, dpa, size);
+ slot, label_slot, dpa, size);
continue;
}
count++;
@@ -641,7 +641,7 @@ u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
}
static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
- unsigned long flags)
+ unsigned long flags)
{
struct nd_namespace_index *nsindex;
unsigned long offset;
@@ -664,7 +664,7 @@ static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
nsindex->myoff = __cpu_to_le64(offset);
nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
offset = (unsigned long) to_namespace_index(ndd,
- nd_label_next_nsindex(index))
+ nd_label_next_nsindex(index))
- (unsigned long) to_namespace_index(ndd, 0);
nsindex->otheroff = __cpu_to_le64(offset);
offset = (unsigned long) nd_label_base(ndd)
@@ -689,7 +689,7 @@ static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
nsindex->checksum = __cpu_to_le64(checksum);
rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
- nsindex, sizeof_namespace_index(ndd));
+ nsindex, sizeof_namespace_index(ndd));
if (rc < 0)
return rc;
@@ -707,7 +707,7 @@ static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
}
static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
- struct nd_namespace_label *nd_label)
+ struct nd_namespace_label *nd_label)
{
return (unsigned long) nd_label
- (unsigned long) to_namespace_index(ndd, 0);
@@ -730,7 +730,7 @@ enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
}
static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
- guid_t *target)
+ guid_t *target)
{
if (claim_class == NVDIMM_CCLASS_BTT)
return &nvdimm_btt_guid;
@@ -751,7 +751,7 @@ static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
}
static void reap_victim(struct nd_mapping *nd_mapping,
- struct nd_label_ent *victim)
+ struct nd_label_ent *victim)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
u32 slot = to_slot(ndd, victim->label);
@@ -762,8 +762,8 @@ static void reap_victim(struct nd_mapping *nd_mapping,
}
static int __pmem_label_update(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
- int pos, unsigned long flags)
+ struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
+ int pos, unsigned long flags)
{
struct nd_namespace_common *ndns = &nspm->nsio.common;
struct nd_interleave_set *nd_set = nd_region->nd_set;
@@ -816,8 +816,8 @@ static int __pmem_label_update(struct nd_region *nd_region,
guid_copy(&nd_label->type_guid, &nd_set->type_guid);
if (namespace_label_has(ndd, abstraction_guid))
guid_copy(&nd_label->abstraction_guid,
- to_abstraction_guid(ndns->claim_class,
- &nd_label->abstraction_guid));
+ to_abstraction_guid(ndns->claim_class,
+ &nd_label->abstraction_guid));
if (namespace_label_has(ndd, checksum)) {
u64 sum;
@@ -830,7 +830,7 @@ static int __pmem_label_update(struct nd_region *nd_region,
/* update label */
offset = nd_label_offset(ndd, nd_label);
rc = nvdimm_set_config_data(ndd, offset, nd_label,
- sizeof_namespace_label(ndd));
+ sizeof_namespace_label(ndd));
if (rc < 0)
return rc;
@@ -840,14 +840,14 @@ static int __pmem_label_update(struct nd_region *nd_region,
if (!label_ent->label)
continue;
if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
- || memcmp(nspm->uuid, label_ent->label->uuid,
- NSLABEL_UUID_LEN) == 0)
+ || memcmp(nspm->uuid, label_ent->label->uuid,
+ NSLABEL_UUID_LEN) == 0)
reap_victim(nd_mapping, label_ent);
}
/* update index */
rc = nd_label_write_index(ndd, ndd->ns_next,
- nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
+ nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
if (rc == 0) {
list_for_each_entry(label_ent, &nd_mapping->labels, list)
if (!label_ent->label) {
@@ -856,8 +856,8 @@ static int __pmem_label_update(struct nd_region *nd_region,
break;
}
dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
- "failed to track label: %d\n",
- to_slot(ndd, nd_label));
+ "failed to track label: %d\n",
+ to_slot(ndd, nd_label));
if (nd_label)
rc = -ENXIO;
}
@@ -879,7 +879,7 @@ static bool is_old_resource(struct resource *res, struct resource **list, int n)
}
static struct resource *to_resource(struct nvdimm_drvdata *ndd,
- struct nd_namespace_label *nd_label)
+ struct nd_namespace_label *nd_label)
{
struct resource *res;
@@ -900,8 +900,8 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd,
* 3/ Record the resources in the namespace device
*/
static int __blk_label_update(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
- int num_labels)
+ struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
+ int num_labels)
{
int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
struct nd_interleave_set *nd_set = nd_region->nd_set;
@@ -956,7 +956,7 @@ static int __blk_label_update(struct nd_region *nd_region,
continue;
res = to_resource(ndd, nd_label);
if (res && is_old_resource(res, old_res_list,
- old_num_resources))
+ old_num_resources))
continue;
slot = to_slot(ndd, nd_label);
set_bit(slot, victim_map);
@@ -1013,7 +1013,7 @@ static int __blk_label_update(struct nd_region *nd_region,
memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
if (nsblk->alt_name)
memcpy(nd_label->name, nsblk->alt_name,
- NSLABEL_NAME_LEN);
+ NSLABEL_NAME_LEN);
nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
/*
@@ -1044,22 +1044,22 @@ static int __blk_label_update(struct nd_region *nd_region,
guid_copy(&nd_label->type_guid, &nd_set->type_guid);
if (namespace_label_has(ndd, abstraction_guid))
guid_copy(&nd_label->abstraction_guid,
- to_abstraction_guid(ndns->claim_class,
- &nd_label->abstraction_guid));
+ to_abstraction_guid(ndns->claim_class,
+ &nd_label->abstraction_guid));
if (namespace_label_has(ndd, checksum)) {
u64 sum;
nd_label->checksum = __cpu_to_le64(0);
sum = nd_fletcher64(nd_label,
- sizeof_namespace_label(ndd), 1);
+ sizeof_namespace_label(ndd), 1);
nd_label->checksum = __cpu_to_le64(sum);
}
/* update label */
offset = nd_label_offset(ndd, nd_label);
rc = nvdimm_set_config_data(ndd, offset, nd_label,
- sizeof_namespace_label(ndd));
+ sizeof_namespace_label(ndd));
if (rc < 0)
goto abort;
}
@@ -1072,7 +1072,7 @@ static int __blk_label_update(struct nd_region *nd_region,
/* update index */
rc = nd_label_write_index(ndd, ndd->ns_next,
- nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
+ nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
if (rc)
goto abort;
@@ -1109,7 +1109,7 @@ static int __blk_label_update(struct nd_region *nd_region,
mutex_lock(&nd_mapping->lock);
label_ent = list_first_entry_or_null(&nd_mapping->labels,
- typeof(*label_ent), list);
+ typeof(*label_ent), list);
if (!label_ent) {
WARN_ON(1);
mutex_unlock(&nd_mapping->lock);
@@ -1133,16 +1133,16 @@ static int __blk_label_update(struct nd_region *nd_region,
}
if (nd_label)
dev_WARN(&nsblk->common.dev,
- "failed to track label slot%d\n", slot);
+ "failed to track label slot%d\n", slot);
}
mutex_unlock(&nd_mapping->lock);
- out:
+out:
kfree(old_res_list);
bitmap_free(victim_map);
return rc;
- abort:
+abort:
/*
* 1/ repair the allocated label bitmap in the index
* 2/ restore the resource list
@@ -1243,11 +1243,11 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
mutex_unlock(&nd_mapping->lock);
return nd_label_write_index(ndd, ndd->ns_next,
- nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
+ nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
}
int nd_pmem_namespace_label_update(struct nd_region *nd_region,
- struct nd_namespace_pmem *nspm, resource_size_t size)
+ struct nd_namespace_pmem *nspm, resource_size_t size)
{
int i, rc;
@@ -1274,7 +1274,7 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
return rc;
rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
- NSLABEL_FLAG_UPDATING);
+ NSLABEL_FLAG_UPDATING);
if (rc)
return rc;
}
@@ -1295,7 +1295,7 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
}
int nd_blk_namespace_label_update(struct nd_region *nd_region,
- struct nd_namespace_blk *nsblk, resource_size_t size)
+ struct nd_namespace_blk *nsblk, resource_size_t size)
{
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct resource *res;
diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
index 4c7b775c2811..aff33d09fec3 100644
--- a/drivers/nvdimm/label.h
+++ b/drivers/nvdimm/label.h
@@ -140,7 +140,7 @@ struct nd_region;
struct nd_namespace_pmem;
struct nd_namespace_blk;
int nd_pmem_namespace_label_update(struct nd_region *nd_region,
- struct nd_namespace_pmem *nspm, resource_size_t size);
+ struct nd_namespace_pmem *nspm, resource_size_t size);
int nd_blk_namespace_label_update(struct nd_region *nd_region,
- struct nd_namespace_blk *nsblk, resource_size_t size);
+ struct nd_namespace_blk *nsblk, resource_size_t size);
#endif /* __LABEL_H__ */
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index a16e52251a30..5ffa137dc963 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -122,7 +122,7 @@ bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
return false;
WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
if (device_for_each_child(&nvdimm_bus->dev, uuid,
- is_namespace_uuid_busy) != 0)
+ is_namespace_uuid_busy) != 0)
return false;
return true;
}
@@ -147,8 +147,8 @@ bool pmem_should_map_pages(struct device *dev)
nsio = to_nd_namespace_io(dev);
if (region_intersects(nsio->res.start, resource_size(&nsio->res),
- IORESOURCE_SYSTEM_RAM,
- IORES_DESC_NONE) == REGION_MIXED)
+ IORESOURCE_SYSTEM_RAM,
+ IORES_DESC_NONE) == REGION_MIXED)
return false;
return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
@@ -167,7 +167,7 @@ unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
return 4096;
else
dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
- nspm->lbasize);
+ nspm->lbasize);
}
/*
@@ -179,7 +179,7 @@ unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
EXPORT_SYMBOL(pmem_sector_size);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
- char *name)
+ char *name)
{
struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
const char *suffix = NULL;
@@ -199,16 +199,16 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
if (nsidx)
sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
- suffix ? suffix : "");
+ suffix ? suffix : "");
else
sprintf(name, "pmem%d%s", nd_region->id,
- suffix ? suffix : "");
+ suffix ? suffix : "");
} else if (is_namespace_blk(&ndns->dev)) {
struct nd_namespace_blk *nsblk;
nsblk = to_nd_namespace_blk(&ndns->dev);
sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
- suffix ? suffix : "");
+ suffix ? suffix : "");
} else {
return NULL;
}
@@ -238,7 +238,7 @@ const u8 *nd_dev_to_uuid(struct device *dev)
EXPORT_SYMBOL(nd_dev_to_uuid);
static ssize_t nstype_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
@@ -247,7 +247,7 @@ static ssize_t nstype_show(struct device *dev,
static DEVICE_ATTR_RO(nstype);
static ssize_t __alt_name_store(struct device *dev, const char *buf,
- const size_t len)
+ const size_t len)
{
char *input, *pos, *alt_name, **ns_altname;
ssize_t rc;
@@ -369,10 +369,10 @@ EXPORT_SYMBOL(nd_namespace_blk_validate);
static int nd_namespace_label_update(struct nd_region *nd_region,
- struct device *dev)
+ struct device *dev)
{
dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
- "namespace must be idle during label update\n");
+ "namespace must be idle during label update\n");
if (dev->driver || to_ndns(dev)->claim)
return 0;
@@ -405,7 +405,7 @@ static int nd_namespace_label_update(struct nd_region *nd_region,
}
static ssize_t alt_name_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
@@ -424,7 +424,7 @@ static ssize_t alt_name_store(struct device *dev,
}
static ssize_t alt_name_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
char *ns_altname;
@@ -444,8 +444,8 @@ static ssize_t alt_name_show(struct device *dev,
static DEVICE_ATTR_RW(alt_name);
static int scan_free(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
- resource_size_t n)
+ struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
+ resource_size_t n)
{
bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -502,7 +502,7 @@ static int scan_free(struct nd_region *nd_region,
* set.
*/
static int shrink_dpa_allocation(struct nd_region *nd_region,
- struct nd_label_id *label_id, resource_size_t n)
+ struct nd_label_id *label_id, resource_size_t n)
{
int i;
@@ -519,8 +519,8 @@ static int shrink_dpa_allocation(struct nd_region *nd_region,
}
static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
- struct nd_region *nd_region, struct nd_mapping *nd_mapping,
- resource_size_t n)
+ struct nd_region *nd_region, struct nd_mapping *nd_mapping,
+ resource_size_t n)
{
bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -561,9 +561,9 @@ static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
* exists). If reserving PMEM any space is valid.
*/
static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
- struct nd_label_id *label_id, struct resource *prev,
- struct resource *next, struct resource *exist,
- resource_size_t n, struct resource *valid)
+ struct nd_label_id *label_id, struct resource *prev,
+ struct resource *next, struct resource *exist,
+ resource_size_t n, struct resource *valid)
{
bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
@@ -599,10 +599,10 @@ static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
/* allocation needs to be contiguous with the existing namespace */
if (valid->start == exist->end + 1
- || valid->end == exist->start - 1)
+ || valid->end == exist->start - 1)
return;
- invalid:
+invalid:
/* truncate @valid size to 0 */
valid->end = valid->start - 1;
}
@@ -612,8 +612,8 @@ enum alloc_loc {
};
static resource_size_t scan_allocate(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
- resource_size_t n)
+ struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
+ resource_size_t n)
{
resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
@@ -629,7 +629,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
valid.start = nd_mapping->start;
valid.end = mapping_end;
valid.name = "free space";
- retry:
+retry:
first = 0;
for_each_dpa_resource(ndd, res) {
struct resource *next = res->sibling, *new_res = NULL;
@@ -649,7 +649,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
valid.start = nd_mapping->start;
valid.end = res->start - 1;
space_valid(nd_region, ndd, label_id, NULL, next, exist,
- to_allocate, &valid);
+ to_allocate, &valid);
available = resource_size(&valid);
if (available)
loc = ALLOC_BEFORE;
@@ -660,7 +660,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
valid.start = res->start + resource_size(res);
valid.end = min(mapping_end, next->start - 1);
space_valid(nd_region, ndd, label_id, res, next, exist,
- to_allocate, &valid);
+ to_allocate, &valid);
available = resource_size(&valid);
if (available)
loc = ALLOC_MID;
@@ -671,7 +671,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
valid.start = res->start + resource_size(res);
valid.end = mapping_end;
space_valid(nd_region, ndd, label_id, res, next, exist,
- to_allocate, &valid);
+ to_allocate, &valid);
available = resource_size(&valid);
if (available)
loc = ALLOC_AFTER;
@@ -685,7 +685,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
if (strcmp(res->name, label_id->id) == 0) {
/* adjust current resource up */
rc = adjust_resource(res, res->start - allocate,
- resource_size(res) + allocate);
+ resource_size(res) + allocate);
action = "cur grow up";
} else
action = "allocate";
@@ -694,8 +694,8 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
if (strcmp(next->name, label_id->id) == 0) {
/* adjust next resource up */
rc = adjust_resource(next, next->start
- - allocate, resource_size(next)
- + allocate);
+ - allocate, resource_size(next)
+ + allocate);
new_res = next;
action = "next grow up";
} else if (strcmp(res->name, label_id->id) == 0) {
@@ -719,13 +719,13 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
valid.start += available - allocate;
new_res = nvdimm_allocate_dpa(ndd, label_id,
- valid.start, allocate);
+ valid.start, allocate);
if (!new_res)
rc = -EBUSY;
} else if (strcmp(action, "grow down") == 0) {
/* adjust current resource down */
rc = adjust_resource(res, res->start, resource_size(res)
- + allocate);
+ + allocate);
if (rc == 0)
res->flags |= DPA_RESOURCE_ADJUSTED;
}
@@ -734,7 +734,7 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
new_res = res;
nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
- action, loc, rc);
+ action, loc, rc);
if (rc)
return n;
@@ -764,22 +764,22 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
}
static int merge_dpa(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
+ struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res;
if (strncmp("pmem", label_id->id, 4) == 0)
return 0;
- retry:
+retry:
for_each_dpa_resource(ndd, res) {
int rc;
struct resource *next = res->sibling;
resource_size_t end = res->start + resource_size(res);
if (!next || strcmp(res->name, label_id->id) != 0
- || strcmp(next->name, label_id->id) != 0
- || end != next->start)
+ || strcmp(next->name, label_id->id) != 0
+ || end != next->start)
continue;
end += resource_size(next);
nvdimm_free_dpa(ndd, next);
@@ -822,9 +822,9 @@ int __reserve_free_pmem(struct device *dev, void *data)
return 0;
rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
dev_WARN_ONCE(&nd_region->dev, rem,
- "pmem reserve underrun: %#llx of %#llx bytes\n",
- (unsigned long long) n - rem,
- (unsigned long long) n);
+ "pmem reserve underrun: %#llx of %#llx bytes\n",
+ (unsigned long long) n - rem,
+ (unsigned long long) n);
return rem ? -ENXIO : 0;
}
@@ -832,7 +832,7 @@ int __reserve_free_pmem(struct device *dev, void *data)
}
void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
- struct nd_mapping *nd_mapping)
+ struct nd_mapping *nd_mapping)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res, *_res;
@@ -843,13 +843,13 @@ void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
}
static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
- struct nd_mapping *nd_mapping)
+ struct nd_mapping *nd_mapping)
{
struct nvdimm *nvdimm = nd_mapping->nvdimm;
int rc;
rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
- __reserve_free_pmem);
+ __reserve_free_pmem);
if (rc)
release_free_pmem(nvdimm_bus, nd_mapping);
return rc;
@@ -869,7 +869,7 @@ static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
* first.
*/
static int grow_dpa_allocation(struct nd_region *nd_region,
- struct nd_label_id *label_id, resource_size_t n)
+ struct nd_label_id *label_id, resource_size_t n)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
@@ -893,7 +893,7 @@ static int grow_dpa_allocation(struct nd_region *nd_region,
return rc;
}
rem = scan_allocate(nd_region, nd_mapping,
- label_id, rem);
+ label_id, rem);
if (blk_only)
release_free_pmem(nvdimm_bus, nd_mapping);
@@ -903,9 +903,9 @@ static int grow_dpa_allocation(struct nd_region *nd_region,
}
dev_WARN_ONCE(&nd_region->dev, rem,
- "allocation underrun: %#llx of %#llx bytes\n",
- (unsigned long long) n - rem,
- (unsigned long long) n);
+ "allocation underrun: %#llx of %#llx bytes\n",
+ (unsigned long long) n - rem,
+ (unsigned long long) n);
if (rem)
return -ENXIO;
@@ -918,7 +918,7 @@ static int grow_dpa_allocation(struct nd_region *nd_region,
}
static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
- struct nd_namespace_pmem *nspm, resource_size_t size)
+ struct nd_namespace_pmem *nspm, resource_size_t size)
{
struct resource *res = &nspm->nsio.res;
resource_size_t offset = 0;
@@ -953,7 +953,7 @@ static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
size = 0;
}
- out:
+out:
res->start = nd_region->ndr_start + offset;
res->end = res->start + size - 1;
}
@@ -1009,7 +1009,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder);
if (remainder) {
dev_dbg(dev, "%llu is not %dK aligned\n", val,
- (SZ_4K * nd_region->ndr_mappings) / SZ_1K);
+ (SZ_4K * nd_region->ndr_mappings) / SZ_1K);
return -EINVAL;
}
@@ -1039,7 +1039,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
allocated = div_u64(allocated, nd_region->ndr_mappings);
if (val < allocated)
rc = shrink_dpa_allocation(nd_region, &label_id,
- allocated - val);
+ allocated - val);
else
rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
@@ -1050,7 +1050,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
nd_namespace_pmem_set_resource(nd_region, nspm,
- val * nd_region->ndr_mappings);
+ val * nd_region->ndr_mappings);
}
/*
@@ -1066,7 +1066,7 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
}
static ssize_t size_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
unsigned long long val;
@@ -1160,10 +1160,10 @@ bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
EXPORT_SYMBOL(nvdimm_namespace_locked);
static ssize_t size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%llu\n", (unsigned long long)
- nvdimm_namespace_capacity(to_ndns(dev)));
+ nvdimm_namespace_capacity(to_ndns(dev)));
}
static DEVICE_ATTR(size, 0444, size_show, size_store);
@@ -1182,7 +1182,7 @@ static u8 *namespace_to_uuid(struct device *dev)
}
static ssize_t uuid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
u8 *uuid = namespace_to_uuid(dev);
@@ -1201,7 +1201,7 @@ static ssize_t uuid_show(struct device *dev,
* @old_uuid: reference to the uuid storage location in the namespace object
*/
static int namespace_update_uuid(struct nd_region *nd_region,
- struct device *dev, u8 *new_uuid, u8 **old_uuid)
+ struct device *dev, u8 *new_uuid, u8 **old_uuid)
{
u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
struct nd_label_id old_label_id;
@@ -1245,7 +1245,7 @@ static int namespace_update_uuid(struct nd_region *nd_region,
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, old_label_id.id) == 0)
sprintf((void *) res->name, "%s",
- new_label_id.id);
+ new_label_id.id);
mutex_lock(&nd_mapping->lock);
list_for_each_entry(label_ent, &nd_mapping->labels, list) {
@@ -1262,13 +1262,13 @@ static int namespace_update_uuid(struct nd_region *nd_region,
mutex_unlock(&nd_mapping->lock);
}
kfree(*old_uuid);
- out:
+out:
*old_uuid = new_uuid;
return 0;
}
static ssize_t uuid_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
u8 *uuid = NULL;
@@ -1300,7 +1300,7 @@ static ssize_t uuid_store(struct device *dev,
else
kfree(uuid);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -1309,7 +1309,7 @@ static ssize_t uuid_store(struct device *dev,
static DEVICE_ATTR_RW(uuid);
static ssize_t resource_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct resource *res;
@@ -1332,31 +1332,31 @@ static ssize_t resource_show(struct device *dev,
static DEVICE_ATTR_RO(resource);
static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
- 4096, 4104, 4160, 4224, 0 };
+ 4096, 4104, 4160, 4224, 0 };
static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
static ssize_t sector_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
if (is_namespace_blk(dev)) {
struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
return nd_size_select_show(nsblk->lbasize,
- blk_lbasize_supported, buf);
+ blk_lbasize_supported, buf);
}
if (is_namespace_pmem(dev)) {
struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
return nd_size_select_show(nspm->lbasize,
- pmem_lbasize_supported, buf);
+ pmem_lbasize_supported, buf);
}
return -ENXIO;
}
static ssize_t sector_size_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
const unsigned long *supported;
@@ -1385,7 +1385,7 @@ static ssize_t sector_size_store(struct device *dev,
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
- buf, buf[len - 1] == '\n' ? "" : "\n");
+ buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -1394,7 +1394,7 @@ static ssize_t sector_size_store(struct device *dev,
static DEVICE_ATTR_RW(sector_size);
static ssize_t dpa_extents_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
struct nd_label_id label_id;
@@ -1428,7 +1428,7 @@ static ssize_t dpa_extents_show(struct device *dev,
if (strcmp(res->name, label_id.id) == 0)
count++;
}
- out:
+out:
nvdimm_bus_unlock(dev);
return sprintf(buf, "%d\n", count);
@@ -1460,7 +1460,7 @@ static int btt_claim_class(struct device *dev)
else {
/* check whether existing labels are v1.1 or v1.2 */
if (__le16_to_cpu(nsindex->major) == 1
- && __le16_to_cpu(nsindex->minor) == 1)
+ && __le16_to_cpu(nsindex->minor) == 1)
loop_bitmask |= 2;
else
loop_bitmask |= 4;
@@ -1497,7 +1497,7 @@ static int btt_claim_class(struct device *dev)
}
static ssize_t holder_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
@@ -1536,7 +1536,7 @@ static ssize_t __holder_class_store(struct device *dev, const char *buf)
}
static ssize_t holder_class_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
@@ -1555,7 +1555,7 @@ static ssize_t holder_class_store(struct device *dev,
}
static ssize_t holder_class_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_namespace_common *ndns = to_ndns(dev);
ssize_t rc;
@@ -1564,7 +1564,7 @@ static ssize_t holder_class_show(struct device *dev,
if (ndns->claim_class == NVDIMM_CCLASS_NONE)
rc = sprintf(buf, "\n");
else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
- (ndns->claim_class == NVDIMM_CCLASS_BTT2))
+ (ndns->claim_class == NVDIMM_CCLASS_BTT2))
rc = sprintf(buf, "btt\n");
else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
rc = sprintf(buf, "pfn\n");
@@ -1579,7 +1579,7 @@ static ssize_t holder_class_show(struct device *dev,
static DEVICE_ATTR_RW(holder_class);
static ssize_t mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_namespace_common *ndns = to_ndns(dev);
struct device *claim;
@@ -1606,7 +1606,7 @@ static ssize_t mode_show(struct device *dev,
static DEVICE_ATTR_RO(mode);
static ssize_t force_raw_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
bool force_raw;
int rc = strtobool(buf, &force_raw);
@@ -1619,7 +1619,7 @@ static ssize_t force_raw_store(struct device *dev,
}
static ssize_t force_raw_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
}
@@ -1641,7 +1641,7 @@ static struct attribute *nd_namespace_attributes[] = {
};
static umode_t namespace_visible(struct kobject *kobj,
- struct attribute *a, int n)
+ struct attribute *a, int n)
{
struct device *dev = container_of(kobj, struct device, kobj);
@@ -1659,10 +1659,10 @@ static umode_t namespace_visible(struct kobject *kobj,
}
if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
- || a == &dev_attr_holder.attr
- || a == &dev_attr_holder_class.attr
- || a == &dev_attr_force_raw.attr
- || a == &dev_attr_mode.attr)
+ || a == &dev_attr_holder.attr
+ || a == &dev_attr_holder_class.attr
+ || a == &dev_attr_force_raw.attr
+ || a == &dev_attr_mode.attr)
return a->mode;
return 0;
@@ -1707,13 +1707,13 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
nd_device_unlock(&ndns->dev);
if (ndns->dev.driver) {
dev_dbg(&ndns->dev, "is active, can't bind %s\n",
- dev_name(dev));
+ dev_name(dev));
return ERR_PTR(-EBUSY);
}
if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
- "host (%s) vs claim (%s) mismatch\n",
- dev_name(dev),
- dev_name(ndns->claim)))
+ "host (%s) vs claim (%s) mismatch\n",
+ dev_name(dev),
+ dev_name(ndns->claim)))
return ERR_PTR(-ENXIO);
} else {
ndns = to_ndns(dev);
@@ -1731,7 +1731,7 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
size = nvdimm_namespace_capacity(ndns);
if (size < ND_MIN_NAMESPACE_SIZE) {
dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
- &size, ND_MIN_NAMESPACE_SIZE);
+ &size, ND_MIN_NAMESPACE_SIZE);
return ERR_PTR(-ENODEV);
}
@@ -1789,7 +1789,7 @@ static struct device **create_namespace_io(struct nd_region *nd_region)
}
static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
- u64 cookie, u16 pos)
+ u64 cookie, u16 pos)
{
struct nd_namespace_label *found = NULL;
int i;
@@ -1819,11 +1819,11 @@ static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
continue;
if (namespace_label_has(ndd, type_guid)
- && !guid_equal(&nd_set->type_guid,
- &nd_label->type_guid)) {
+ && !guid_equal(&nd_set->type_guid,
+ &nd_label->type_guid)) {
dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
- &nd_set->type_guid,
- &nd_label->type_guid);
+ &nd_set->type_guid,
+ &nd_label->type_guid);
continue;
}
@@ -1883,11 +1883,11 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
pmem_start = __le64_to_cpu(nd_label->dpa);
pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
if (pmem_start >= hw_start && pmem_start < hw_end
- && pmem_end <= hw_end && pmem_end > hw_start)
+ && pmem_end <= hw_end && pmem_end > hw_start)
/* pass */;
else {
dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
- dev_name(ndd->dev), nd_label->uuid);
+ dev_name(ndd->dev), nd_label->uuid);
return -EINVAL;
}
@@ -1904,8 +1904,8 @@ static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
* @nd_label: target pmem namespace label to evaluate
*/
static struct device *create_namespace_pmem(struct nd_region *nd_region,
- struct nd_namespace_index *nsindex,
- struct nd_namespace_label *nd_label)
+ struct nd_namespace_index *nsindex,
+ struct nd_namespace_label *nd_label)
{
u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
@@ -1925,12 +1925,12 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
- nd_label->uuid);
+ nd_label->uuid);
if (__le64_to_cpu(nd_label->isetcookie) != altcookie)
return ERR_PTR(-EAGAIN);
dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
- nd_label->uuid);
+ nd_label->uuid);
}
nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
@@ -1962,7 +1962,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
* find a dimm with two instances of the same uuid.
*/
dev_err(&nd_region->dev, "%s missing label for %pUb\n",
- nvdimm_name(nvdimm), nd_label->uuid);
+ nvdimm_name(nvdimm), nd_label->uuid);
rc = -EINVAL;
goto err;
}
@@ -1986,7 +1986,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
nd_mapping = &nd_region->mapping[i];
label_ent = list_first_entry_or_null(&nd_mapping->labels,
- typeof(*label_ent), list);
+ typeof(*label_ent), list);
label0 = label_ent ? label_ent->label : 0;
if (!label0) {
@@ -1999,9 +1999,9 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
continue;
WARN_ON(nspm->alt_name || nspm->uuid);
nspm->alt_name = kmemdup((void __force *) label0->name,
- NSLABEL_NAME_LEN, GFP_KERNEL);
+ NSLABEL_NAME_LEN, GFP_KERNEL);
nspm->uuid = kmemdup((void __force *) label0->uuid,
- NSLABEL_UUID_LEN, GFP_KERNEL);
+ NSLABEL_UUID_LEN, GFP_KERNEL);
nspm->lbasize = __le64_to_cpu(label0->lbasize);
ndd = to_ndd(nd_mapping);
if (namespace_label_has(ndd, abstraction_guid))
@@ -2018,7 +2018,7 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
nd_namespace_pmem_set_resource(nd_region, nspm, size);
return dev;
- err:
+err:
namespace_pmem_release(dev);
switch (rc) {
case -EINVAL:
@@ -2035,22 +2035,22 @@ static struct device *create_namespace_pmem(struct nd_region *nd_region,
}
struct resource *nsblk_add_resource(struct nd_region *nd_region,
- struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
- resource_size_t start)
+ struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
+ resource_size_t start)
{
struct nd_label_id label_id;
struct resource *res;
nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
res = krealloc(nsblk->res,
- sizeof(void *) * (nsblk->num_resources + 1),
- GFP_KERNEL);
+ sizeof(void *) * (nsblk->num_resources + 1),
+ GFP_KERNEL);
if (!res)
return NULL;
nsblk->res = (struct resource **) res;
for_each_dpa_resource(ndd, res)
if (strcmp(res->name, label_id.id) == 0
- && res->start == start) {
+ && res->start == start) {
nsblk->res[nsblk->num_resources++] = res;
return res;
}
@@ -2133,7 +2133,7 @@ void nd_region_create_ns_seed(struct nd_region *nd_region)
*/
if (!nd_region->ns_seed)
dev_err(&nd_region->dev, "failed to create %s namespace\n",
- is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
+ is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
else
nd_device_register(nd_region->ns_seed);
}
@@ -2175,8 +2175,8 @@ void nd_region_create_btt_seed(struct nd_region *nd_region)
}
static int add_namespace_resource(struct nd_region *nd_region,
- struct nd_namespace_label *nd_label, struct device **devs,
- int count)
+ struct nd_namespace_label *nd_label, struct device **devs,
+ int count)
{
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -2195,15 +2195,15 @@ static int add_namespace_resource(struct nd_region *nd_region,
continue;
if (is_namespace_blk(devs[i])) {
res = nsblk_add_resource(nd_region, ndd,
- to_nd_namespace_blk(devs[i]),
- __le64_to_cpu(nd_label->dpa));
+ to_nd_namespace_blk(devs[i]),
+ __le64_to_cpu(nd_label->dpa));
if (!res)
return -ENXIO;
nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
} else {
dev_err(&nd_region->dev,
- "error: conflicting extents for uuid: %pUb\n",
- nd_label->uuid);
+ "error: conflicting extents for uuid: %pUb\n",
+ nd_label->uuid);
return -ENXIO;
}
break;
@@ -2213,7 +2213,7 @@ static int add_namespace_resource(struct nd_region *nd_region,
}
static struct device *create_namespace_blk(struct nd_region *nd_region,
- struct nd_namespace_label *nd_label, int count)
+ struct nd_namespace_label *nd_label, int count)
{
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
@@ -2227,15 +2227,15 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
if (namespace_label_has(ndd, type_guid)) {
if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
- &nd_set->type_guid,
- &nd_label->type_guid);
+ &nd_set->type_guid,
+ &nd_label->type_guid);
return ERR_PTR(-EAGAIN);
}
if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) {
dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n",
- nd_set->cookie2,
- __le64_to_cpu(nd_label->isetcookie));
+ nd_set->cookie2,
+ __le64_to_cpu(nd_label->isetcookie));
return ERR_PTR(-EAGAIN);
}
}
@@ -2249,7 +2249,7 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
nsblk->id = -1;
nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
- GFP_KERNEL);
+ GFP_KERNEL);
if (namespace_label_has(ndd, abstraction_guid))
nsblk->common.claim_class
= to_nvdimm_cclass(&nd_label->abstraction_guid);
@@ -2258,17 +2258,17 @@ static struct device *create_namespace_blk(struct nd_region *nd_region,
memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
if (name[0]) {
nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
- GFP_KERNEL);
+ GFP_KERNEL);
if (!nsblk->alt_name)
goto blk_err;
}
res = nsblk_add_resource(nd_region, ndd, nsblk,
- __le64_to_cpu(nd_label->dpa));
+ __le64_to_cpu(nd_label->dpa));
if (!res)
goto blk_err;
nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
return dev;
- blk_err:
+blk_err:
namespace_blk_release(dev);
return ERR_PTR(-ENXIO);
}
@@ -2288,14 +2288,14 @@ static int cmp_dpa(const void *a, const void *b)
nsblk_b = to_nd_namespace_blk(dev_b);
return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start,
- sizeof(resource_size_t));
+ sizeof(resource_size_t));
}
nspm_a = to_nd_namespace_pmem(dev_a);
nspm_b = to_nd_namespace_pmem(dev_b);
return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
- sizeof(resource_size_t));
+ sizeof(resource_size_t));
}
static struct device **scan_labels(struct nd_region *nd_region)
@@ -2316,7 +2316,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
continue;
flags = __le32_to_cpu(nd_label->flags);
if (is_nd_blk(&nd_region->dev)
- == !!(flags & NSLABEL_FLAG_LOCAL))
+ == !!(flags & NSLABEL_FLAG_LOCAL))
/* pass, region matches label type */;
else
continue;
@@ -2364,8 +2364,8 @@ static struct device **scan_labels(struct nd_region *nd_region)
}
dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
- count, is_nd_blk(&nd_region->dev)
- ? "blk" : "pmem", count == 1 ? "" : "s");
+ count, is_nd_blk(&nd_region->dev)
+ ? "blk" : "pmem", count == 1 ? "" : "s");
if (count == 0) {
/* Publish a zero-sized namespace for userspace to configure. */
@@ -2423,7 +2423,7 @@ static struct device **scan_labels(struct nd_region *nd_region)
return devs;
- err:
+err:
if (devs) {
for (i = 0; devs[i]; i++)
if (is_nd_blk(&nd_region->dev))
@@ -2486,9 +2486,9 @@ static int init_active_labels(struct nd_region *nd_region)
return 0;
dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
- dev_name(&nd_mapping->nvdimm->dev),
- test_bit(NDD_LOCKED, &nvdimm->flags)
- ? "locked" : "disabled");
+ dev_name(&nd_mapping->nvdimm->dev),
+ test_bit(NDD_LOCKED, &nvdimm->flags)
+ ? "locked" : "disabled");
return -ENXIO;
}
nd_mapping->ndd = ndd;
@@ -2570,14 +2570,14 @@ int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
nsblk = to_nd_namespace_blk(dev);
id = ida_simple_get(&nd_region->ns_ida, 0, 0,
- GFP_KERNEL);
+ GFP_KERNEL);
nsblk->id = id;
} else if (type == ND_DEVICE_NAMESPACE_PMEM) {
struct nd_namespace_pmem *nspm;
nspm = to_nd_namespace_pmem(dev);
id = ida_simple_get(&nd_region->ns_ida, 0, 0,
- GFP_KERNEL);
+ GFP_KERNEL);
nspm->id = id;
} else
id = i;
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 454454ba1738..60525ff1f19f 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -48,7 +48,7 @@ struct nvdimm {
};
static inline unsigned long nvdimm_security_flags(
- struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype)
+ struct nvdimm *nvdimm, enum nvdimm_passphrase_type ptype)
{
u64 flags;
const u64 state_flags = 1UL << NVDIMM_SECURITY_DISABLED
@@ -62,8 +62,8 @@ static inline unsigned long nvdimm_security_flags(
flags = nvdimm->sec.ops->get_flags(nvdimm, ptype);
/* disabled, locked, unlocked, and overwrite are mutually exclusive */
dev_WARN_ONCE(&nvdimm->dev, hweight64(flags & state_flags) > 1,
- "reported invalid security state: %#llx\n",
- (unsigned long long) flags);
+ "reported invalid security state: %#llx\n",
+ (unsigned long long) flags);
return flags;
}
int nvdimm_security_freeze(struct nvdimm *nvdimm);
@@ -72,7 +72,7 @@ ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len);
void nvdimm_security_overwrite_query(struct work_struct *work);
#else
static inline ssize_t nvdimm_security_store(struct device *dev,
- const char *buf, size_t len)
+ const char *buf, size_t len)
{
return -EOPNOTSUPP;
}
@@ -146,29 +146,29 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping);
resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, resource_size_t *overlap);
+ struct nd_mapping *nd_mapping, resource_size_t *overlap);
resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
- resource_size_t size);
+ resource_size_t size);
resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
- struct nd_label_id *label_id);
+ struct nd_label_id *label_id);
int alias_dpa_busy(struct device *dev, void *data);
struct resource *nsblk_add_resource(struct nd_region *nd_region,
- struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
- resource_size_t start);
+ struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
+ resource_size_t start);
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd);
void get_ndd(struct nvdimm_drvdata *ndd);
resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
void nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
void __nd_detach_ndns(struct device *dev, struct nd_namespace_common **_ndns);
bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
- struct nd_namespace_common **_ndns);
+ struct nd_namespace_common **_ndns);
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
- struct nd_namespace_common **_ndns);
+ struct nd_namespace_common **_ndns);
ssize_t nd_namespace_store(struct device *dev,
- struct nd_namespace_common **_ndns, const char *buf,
- size_t len);
+ struct nd_namespace_common **_ndns, const char *buf,
+ size_t len);
struct nd_pfn *to_nd_pfn_safe(struct device *dev);
bool is_nvdimm_bus(struct device *dev);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index 1b9955651379..56ffd998d642 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -43,7 +43,7 @@ struct nd_region_data {
};
static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
- int dimm, int hint)
+ int dimm, int hint)
{
unsigned int num = 1 << ndrd->hints_shift;
unsigned int mask = num - 1;
@@ -52,7 +52,7 @@ static inline void __iomem *ndrd_get_flush_wpq(struct nd_region_data *ndrd,
}
static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
- int hint, void __iomem *flush)
+ int hint, void __iomem *flush)
{
unsigned int num = 1 << ndrd->hints_shift;
unsigned int mask = num - 1;
@@ -61,7 +61,7 @@ static inline void ndrd_set_flush_wpq(struct nd_region_data *ndrd, int dimm,
}
static inline struct nd_namespace_index *to_namespace_index(
- struct nvdimm_drvdata *ndd, int i)
+ struct nvdimm_drvdata *ndd, int i)
{
if (i < 0)
return NULL;
@@ -70,35 +70,35 @@ static inline struct nd_namespace_index *to_namespace_index(
}
static inline struct nd_namespace_index *to_current_namespace_index(
- struct nvdimm_drvdata *ndd)
+ struct nvdimm_drvdata *ndd)
{
return to_namespace_index(ndd, ndd->ns_current);
}
static inline struct nd_namespace_index *to_next_namespace_index(
- struct nvdimm_drvdata *ndd)
+ struct nvdimm_drvdata *ndd)
{
return to_namespace_index(ndd, ndd->ns_next);
}
unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
-#define namespace_label_has(ndd, field) \
- (offsetof(struct nd_namespace_label, field) \
- < sizeof_namespace_label(ndd))
+#define namespace_label_has(ndd, field) \
+ (offsetof(struct nd_namespace_label, field) \
+ < sizeof_namespace_label(ndd))
-#define nd_dbg_dpa(r, d, res, fmt, arg...) \
+#define nd_dbg_dpa(r, d, res, fmt, arg...) \
dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
(r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
- (unsigned long long) (res ? resource_size(res) : 0), \
+ (unsigned long long) (res ? resource_size(res) : 0), \
(unsigned long long) (res ? res->start : 0), ##arg)
-#define for_each_dpa_resource(ndd, res) \
+#define for_each_dpa_resource(ndd, res) \
for (res = (ndd)->dpa.child; res; res = res->sibling)
-#define for_each_dpa_resource_safe(ndd, res, next) \
- for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \
- res; res = next, next = next ? next->sibling : NULL)
+#define for_each_dpa_resource_safe(ndd, res, next) \
+ for (res = (ndd)->dpa.child, next = res ? res->sibling : NULL; \
+ res; res = next, next = next ? next->sibling : NULL)
struct nd_percpu_lane {
int count;
@@ -162,7 +162,7 @@ struct nd_region {
struct nd_blk_region {
int (*enable)(struct nvdimm_bus *nvdimm_bus, struct device *dev);
int (*do_io)(struct nd_blk_region *ndbr, resource_size_t dpa,
- void *iobuf, u64 len, int rw);
+ void *iobuf, u64 len, int rw);
void *blk_provider_data;
struct nd_region nd_region;
};
@@ -223,11 +223,11 @@ void nd_device_register(struct device *dev);
void nd_device_unregister(struct device *dev, enum nd_async_mode mode);
void nd_device_notify(struct device *dev, enum nvdimm_event event);
int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
- size_t len);
+ size_t len);
ssize_t nd_size_select_show(unsigned long current_size,
- const unsigned long *supported, char *buf);
+ const unsigned long *supported, char *buf);
ssize_t nd_size_select_store(struct device *dev, const char *buf,
- unsigned long *current_size, const unsigned long *supported);
+ unsigned long *current_size, const unsigned long *supported);
int __init nvdimm_init(void);
int __init nd_region_init(void);
int __init nd_label_init(void);
@@ -241,9 +241,9 @@ int nvdimm_init_config_data(struct nvdimm_drvdata *ndd);
int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
size_t offset, size_t len);
int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
- void *buf, size_t len);
+ void *buf, size_t len);
long nvdimm_clear_poison(struct device *dev, phys_addr_t phys,
- unsigned int len);
+ unsigned int len);
void nvdimm_set_aliasing(struct device *dev);
void nvdimm_set_locked(struct device *dev);
void nvdimm_clear_locked(struct device *dev);
@@ -270,7 +270,7 @@ bool is_nd_btt(struct device *dev);
struct device *nd_btt_create(struct nd_region *nd_region);
#else
static inline int nd_btt_probe(struct device *dev,
- struct nd_namespace_common *ndns)
+ struct nd_namespace_common *ndns)
{
return -ENODEV;
}
@@ -299,12 +299,12 @@ int nd_pfn_probe(struct device *dev, struct nd_namespace_common *ndns);
bool is_nd_pfn(struct device *dev);
struct device *nd_pfn_create(struct nd_region *nd_region);
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
- struct nd_namespace_common *ndns);
+ struct nd_namespace_common *ndns);
int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig);
extern struct attribute_group nd_pfn_attribute_group;
#else
static inline int nd_pfn_probe(struct device *dev,
- struct nd_namespace_common *ndns)
+ struct nd_namespace_common *ndns)
{
return -ENODEV;
}
@@ -332,7 +332,7 @@ bool is_nd_dax(struct device *dev);
struct device *nd_dax_create(struct nd_region *nd_region);
#else
static inline int nd_dax_probe(struct device *dev,
- struct nd_namespace_common *ndns)
+ struct nd_namespace_common *ndns)
{
return -ENODEV;
}
@@ -351,7 +351,7 @@ static inline struct device *nd_dax_create(struct nd_region *nd_region)
int nd_region_to_nstype(struct nd_region *nd_region);
int nd_region_register_namespaces(struct nd_region *nd_region, int *err);
u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
- struct nd_namespace_index *nsindex);
+ struct nd_namespace_index *nsindex);
u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region);
void nvdimm_bus_lock(struct device *dev);
void nvdimm_bus_unlock(struct device *dev);
@@ -362,18 +362,18 @@ void put_ndd(struct nvdimm_drvdata *ndd);
int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd);
void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res);
struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
- struct nd_label_id *label_id, resource_size_t start,
- resource_size_t n);
+ struct nd_label_id *label_id, resource_size_t start,
+ resource_size_t n);
resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
bool nvdimm_namespace_locked(struct nd_namespace_common *ndns);
struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev);
int nvdimm_namespace_attach_btt(struct nd_namespace_common *ndns);
int nvdimm_namespace_detach_btt(struct nd_btt *nd_btt);
const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
- char *name);
+ char *name);
unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
void nvdimm_badblocks_populate(struct nd_region *nd_region,
- struct badblocks *bb, const struct resource *res);
+ struct badblocks *bb, const struct resource *res);
#if IS_ENABLED(CONFIG_ND_CLAIM)
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
@@ -385,12 +385,12 @@ static inline int nvdimm_setup_pfn(struct nd_pfn *nd_pfn,
return -ENXIO;
}
static inline int devm_nsio_enable(struct device *dev,
- struct nd_namespace_io *nsio)
+ struct nd_namespace_io *nsio)
{
return -ENXIO;
}
static inline void devm_nsio_disable(struct device *dev,
- struct nd_namespace_io *nsio)
+ struct nd_namespace_io *nsio)
{
}
#endif
@@ -416,14 +416,14 @@ static inline void nd_iostat_end(struct bio *bio, unsigned long start)
generic_end_io_acct(disk->queue, bio_op(bio), &disk->part0, start);
}
static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
- unsigned int len)
+ unsigned int len)
{
if (bb->count) {
sector_t first_bad;
int num_bad;
return !!badblocks_check(bb, sector, len / 512, &first_bad,
- &num_bad);
+ &num_bad);
}
return false;
diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
index 10351d5b49fa..e4f553633759 100644
--- a/drivers/nvdimm/nd_virtio.c
+++ b/drivers/nvdimm/nd_virtio.c
@@ -9,7 +9,7 @@
#include "virtio_pmem.h"
#include "nd.h"
- /* The interrupt handler */
+/* The interrupt handler */
void virtio_pmem_host_ack(struct virtqueue *vq)
{
struct virtio_pmem *vpmem = vq->vdev->priv;
@@ -24,7 +24,7 @@ void virtio_pmem_host_ack(struct virtqueue *vq)
if (!list_empty(&vpmem->req_list)) {
req_buf = list_first_entry(&vpmem->req_list,
- struct virtio_pmem_request, list);
+ struct virtio_pmem_request, list);
req_buf->wq_buf_avail = true;
wake_up(&req_buf->wq_buf);
list_del(&req_buf->list);
@@ -34,7 +34,7 @@ void virtio_pmem_host_ack(struct virtqueue *vq)
}
EXPORT_SYMBOL_GPL(virtio_pmem_host_ack);
- /* The request submission function */
+/* The request submission function */
static int virtio_pmem_flush(struct nd_region *nd_region)
{
struct virtio_device *vdev = nd_region->provider_data;
@@ -60,12 +60,12 @@ static int virtio_pmem_flush(struct nd_region *nd_region)
sgs[1] = &ret;
spin_lock_irqsave(&vpmem->pmem_lock, flags);
- /*
- * If virtqueue_add_sgs returns -ENOSPC then req_vq virtual
- * queue does not have free descriptor. We add the request
- * to req_list and wait for host_ack to wake us up when free
- * slots are available.
- */
+ /*
+ * If virtqueue_add_sgs returns -ENOSPC then req_vq virtual
+ * queue does not have free descriptor. We add the request
+ * to req_list and wait for host_ack to wake us up when free
+ * slots are available.
+ */
while ((err = virtqueue_add_sgs(vpmem->req_vq, sgs, 1, 1, req_data,
GFP_ATOMIC)) == -ENOSPC) {
diff --git a/drivers/nvdimm/of_pmem.c b/drivers/nvdimm/of_pmem.c
index 97187d6c0bdb..03ffcbf601d4 100644
--- a/drivers/nvdimm/of_pmem.c
+++ b/drivers/nvdimm/of_pmem.c
@@ -55,7 +55,7 @@ static int of_pmem_region_probe(struct platform_device *pdev)
is_volatile = !!of_find_property(np, "volatile", NULL);
dev_dbg(&pdev->dev, "Registering %s regions from %pOF\n",
- is_volatile ? "volatile" : "non-volatile", np);
+ is_volatile ? "volatile" : "non-volatile", np);
for (i = 0; i < pdev->num_resources; i++) {
struct nd_region_desc ndr_desc;
@@ -80,10 +80,10 @@ static int of_pmem_region_probe(struct platform_device *pdev)
if (!region)
dev_warn(&pdev->dev, "Unable to register region %pR from %pOF\n",
- ndr_desc.res, np);
+ ndr_desc.res, np);
else
dev_dbg(&pdev->dev, "Registered region %pR from %pOF\n",
- ndr_desc.res, np);
+ ndr_desc.res, np);
}
return 0;
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index cb98b8fe786e..354ec83f0081 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -47,7 +47,7 @@ struct nd_pfn *to_nd_pfn(struct device *dev)
EXPORT_SYMBOL(to_nd_pfn);
static ssize_t mode_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
@@ -62,7 +62,7 @@ static ssize_t mode_show(struct device *dev,
}
static ssize_t mode_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc = 0;
@@ -75,19 +75,19 @@ static ssize_t mode_store(struct device *dev,
size_t n = len - 1;
if (strncmp(buf, "pmem\n", n) == 0
- || strncmp(buf, "pmem", n) == 0) {
+ || strncmp(buf, "pmem", n) == 0) {
nd_pfn->mode = PFN_MODE_PMEM;
} else if (strncmp(buf, "ram\n", n) == 0
- || strncmp(buf, "ram", n) == 0)
+ || strncmp(buf, "ram", n) == 0)
nd_pfn->mode = PFN_MODE_RAM;
else if (strncmp(buf, "none\n", n) == 0
- || strncmp(buf, "none", n) == 0)
+ || strncmp(buf, "none", n) == 0)
nd_pfn->mode = PFN_MODE_NONE;
else
rc = -EINVAL;
}
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -96,7 +96,7 @@ static ssize_t mode_store(struct device *dev,
static DEVICE_ATTR_RW(mode);
static ssize_t align_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
@@ -127,7 +127,7 @@ static const unsigned long *nd_pfn_supported_alignments(void)
}
static ssize_t align_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
@@ -135,9 +135,9 @@ static ssize_t align_store(struct device *dev,
nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_pfn->align,
- nd_pfn_supported_alignments());
+ nd_pfn_supported_alignments());
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -146,7 +146,7 @@ static ssize_t align_store(struct device *dev,
static DEVICE_ATTR_RW(align);
static ssize_t uuid_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
@@ -156,7 +156,7 @@ static ssize_t uuid_show(struct device *dev,
}
static ssize_t uuid_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
@@ -164,7 +164,7 @@ static ssize_t uuid_store(struct device *dev,
nd_device_lock(dev);
rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ buf[len - 1] == '\n' ? "" : "\n");
nd_device_unlock(dev);
return rc ? rc : len;
@@ -172,20 +172,20 @@ static ssize_t uuid_store(struct device *dev,
static DEVICE_ATTR_RW(uuid);
static ssize_t namespace_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
rc = sprintf(buf, "%s\n", nd_pfn->ndns
- ? dev_name(&nd_pfn->ndns->dev) : "");
+ ? dev_name(&nd_pfn->ndns->dev) : "");
nvdimm_bus_unlock(dev);
return rc;
}
static ssize_t namespace_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
@@ -194,7 +194,7 @@ static ssize_t namespace_store(struct device *dev,
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -203,7 +203,7 @@ static ssize_t namespace_store(struct device *dev,
static DEVICE_ATTR_RW(namespace);
static ssize_t resource_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
@@ -217,7 +217,7 @@ static ssize_t resource_show(struct device *dev,
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
rc = sprintf(buf, "%#llx\n", (unsigned long long) nsio->res.start
- + start_pad + offset);
+ + start_pad + offset);
} else {
/* no address to convey if the pfn instance is disabled */
rc = -ENXIO;
@@ -229,7 +229,7 @@ static ssize_t resource_show(struct device *dev,
static DEVICE_ATTR_RO(resource);
static ssize_t size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
@@ -244,8 +244,8 @@ static ssize_t size_show(struct device *dev,
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
rc = sprintf(buf, "%llu\n", (unsigned long long)
- resource_size(&nsio->res) - start_pad
- - end_trunc - offset);
+ resource_size(&nsio->res) - start_pad
+ - end_trunc - offset);
} else {
/* no size to convey if the pfn instance is disabled */
rc = -ENXIO;
@@ -257,7 +257,7 @@ static ssize_t size_show(struct device *dev,
static DEVICE_ATTR_RO(size);
static ssize_t supported_alignments_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
return nd_size_select_show(0, nd_pfn_supported_alignments(), buf);
}
@@ -294,7 +294,7 @@ static const struct attribute_group *nd_pfn_attribute_groups[] = {
};
struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
- struct nd_namespace_common *ndns)
+ struct nd_namespace_common *ndns)
{
struct device *dev;
@@ -307,7 +307,7 @@ struct device *nd_pfn_devinit(struct nd_pfn *nd_pfn,
device_initialize(&nd_pfn->dev);
if (ndns && !__nd_attach_ndns(&nd_pfn->dev, ndns, &nd_pfn->ndns)) {
dev_dbg(&ndns->dev, "failed, already claimed by %s\n",
- dev_name(ndns->claim));
+ dev_name(ndns->claim));
put_device(dev);
return NULL;
}
@@ -381,13 +381,13 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
u64 nsoff;
bb_present = badblocks_check(&nd_region->bb, meta_start,
- meta_num, &first_bad, &num_bad);
+ meta_num, &first_bad, &num_bad);
if (bb_present) {
dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
- num_bad, first_bad);
+ num_bad, first_bad);
nsoff = ALIGN_DOWN((nd_region->ndr_start
- + (first_bad << 9)) - nsio->res.start,
- PAGE_SIZE);
+ + (first_bad << 9)) - nsio->res.start,
+ PAGE_SIZE);
zero_len = ALIGN(num_bad << 9, PAGE_SIZE);
while (zero_len) {
unsigned long chunk = min(zero_len, PAGE_SIZE);
@@ -502,17 +502,17 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
*/
if (nd_pfn->align != align || nd_pfn->mode != mode) {
dev_err(&nd_pfn->dev,
- "init failed, settings mismatch\n");
+ "init failed, settings mismatch\n");
dev_dbg(&nd_pfn->dev, "align: %lx:%lx mode: %d:%d\n",
- nd_pfn->align, align, nd_pfn->mode,
- mode);
+ nd_pfn->align, align, nd_pfn->mode,
+ mode);
return -EINVAL;
}
}
if (align > nvdimm_namespace_capacity(ndns)) {
dev_err(&nd_pfn->dev, "alignment: %lx exceeds capacity %llx\n",
- align, nvdimm_namespace_capacity(ndns));
+ align, nvdimm_namespace_capacity(ndns));
return -EINVAL;
}
@@ -525,15 +525,15 @@ int nd_pfn_validate(struct nd_pfn *nd_pfn, const char *sig)
nsio = to_nd_namespace_io(&ndns->dev);
if (offset >= resource_size(&nsio->res)) {
dev_err(&nd_pfn->dev, "pfn array size exceeds capacity of %s\n",
- dev_name(&ndns->dev));
+ dev_name(&ndns->dev));
return -EBUSY;
}
if ((align && !IS_ALIGNED(nsio->res.start + offset + start_pad, align))
- || !IS_ALIGNED(offset, PAGE_SIZE)) {
+ || !IS_ALIGNED(offset, PAGE_SIZE)) {
dev_err(&nd_pfn->dev,
- "bad offset: %#llx dax disabled align: %#lx\n",
- offset, align);
+ "bad offset: %#llx dax disabled align: %#lx\n",
+ offset, align);
return -ENXIO;
}
@@ -635,9 +635,9 @@ static int __nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap)
nd_pfn->npfns = PHYS_PFN((resource_size(res) - offset));
if (le64_to_cpu(nd_pfn->pfn_sb->npfns) > nd_pfn->npfns)
dev_info(&nd_pfn->dev,
- "number of pfns truncated from %lld to %ld\n",
- le64_to_cpu(nd_pfn->pfn_sb->npfns),
- nd_pfn->npfns);
+ "number of pfns truncated from %lld to %ld\n",
+ le64_to_cpu(nd_pfn->pfn_sb->npfns),
+ nd_pfn->npfns);
memcpy(altmap, &__altmap, sizeof(*altmap));
altmap->free = PHYS_PFN(offset - reserve);
altmap->alloc = 0;
@@ -682,8 +682,8 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
nd_region = to_nd_region(nd_pfn->dev.parent);
if (nd_region->ro) {
dev_info(&nd_pfn->dev,
- "%s is read-only, unable to init metadata\n",
- dev_name(&nd_region->dev));
+ "%s is read-only, unable to init metadata\n",
+ dev_name(&nd_region->dev));
return -ENXIO;
}
@@ -712,7 +712,7 @@ static int nd_pfn_init(struct nd_pfn *nd_pfn)
if (offset >= size) {
dev_err(&nd_pfn->dev, "%s unable to satisfy requested alignment\n",
- dev_name(&ndns->dev));
+ dev_name(&ndns->dev));
return -ENXIO;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 4c121dd03dd9..29f19db46845 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -45,7 +45,7 @@ static struct nd_region *to_region(struct pmem_device *pmem)
}
static void hwpoison_clear(struct pmem_device *pmem,
- phys_addr_t phys, unsigned int len)
+ phys_addr_t phys, unsigned int len)
{
unsigned long pfn_start, pfn_end, pfn;
@@ -69,7 +69,7 @@ static void hwpoison_clear(struct pmem_device *pmem,
}
static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
- phys_addr_t offset, unsigned int len)
+ phys_addr_t offset, unsigned int len)
{
struct device *dev = to_dev(pmem);
sector_t sector;
@@ -85,8 +85,8 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
cleared /= 512;
dev_dbg(dev, "%#llx clear %ld sector%s\n",
- (unsigned long long) sector, cleared,
- cleared > 1 ? "s" : "");
+ (unsigned long long) sector, cleared,
+ cleared > 1 ? "s" : "");
badblocks_clear(&pmem->bb, sector, cleared);
if (pmem->bb_state)
sysfs_notify_dirent(pmem->bb_state);
@@ -98,7 +98,7 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
}
static void write_pmem(void *pmem_addr, struct page *page,
- unsigned int off, unsigned int len)
+ unsigned int off, unsigned int len)
{
unsigned int chunk;
void *mem;
@@ -116,7 +116,7 @@ static void write_pmem(void *pmem_addr, struct page *page,
}
static blk_status_t read_pmem(struct page *page, unsigned int off,
- void *pmem_addr, unsigned int len)
+ void *pmem_addr, unsigned int len)
{
unsigned int chunk;
unsigned long rem;
@@ -138,8 +138,8 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
}
static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
- unsigned int len, unsigned int off, unsigned int op,
- sector_t sector)
+ unsigned int len, unsigned int off, unsigned int op,
+ sector_t sector)
{
blk_status_t rc = BLK_STS_OK;
bool bad_pmem = false;
@@ -199,7 +199,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
do_acct = nd_iostat_start(bio, &start);
bio_for_each_segment(bvec, bio, iter) {
rc = pmem_do_bvec(pmem, bvec.bv_page, bvec.bv_len,
- bvec.bv_offset, bio_op(bio), iter.bi_sector);
+ bvec.bv_offset, bio_op(bio), iter.bi_sector);
if (rc) {
bio->bi_status = rc;
break;
@@ -219,7 +219,7 @@ static blk_qc_t pmem_make_request(struct request_queue *q, struct bio *bio)
}
static int pmem_rw_page(struct block_device *bdev, sector_t sector,
- struct page *page, unsigned int op)
+ struct page *page, unsigned int op)
{
struct pmem_device *pmem = bdev->bd_queue->queuedata;
blk_status_t rc;
@@ -241,12 +241,12 @@ static int pmem_rw_page(struct block_device *bdev, sector_t sector,
/* see "strong" declaration in tools/testing/nvdimm/pmem-dax.c */
__weak long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn)
+ long nr_pages, void **kaddr, pfn_t *pfn)
{
resource_size_t offset = PFN_PHYS(pgoff) + pmem->data_offset;
if (unlikely(is_bad_pmem(&pmem->bb, PFN_PHYS(pgoff) / 512,
- PFN_PHYS(nr_pages))))
+ PFN_PHYS(nr_pages))))
return -EIO;
if (kaddr)
@@ -270,7 +270,7 @@ static const struct block_device_operations pmem_fops = {
};
static long pmem_dax_direct_access(struct dax_device *dax_dev,
- pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
+ pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
{
struct pmem_device *pmem = dax_get_private(dax_dev);
@@ -284,13 +284,13 @@ static long pmem_dax_direct_access(struct dax_device *dax_dev,
* dax_iomap_actor()
*/
static size_t pmem_copy_from_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+ void *addr, size_t bytes, struct iov_iter *i)
{
return _copy_from_iter_flushcache(addr, bytes, i);
}
static size_t pmem_copy_to_iter(struct dax_device *dax_dev, pgoff_t pgoff,
- void *addr, size_t bytes, struct iov_iter *i)
+ void *addr, size_t bytes, struct iov_iter *i)
{
return _copy_to_iter_mcsafe(addr, bytes, i);
}
@@ -350,7 +350,7 @@ static const struct dev_pagemap_ops fsdax_pagemap_ops = {
};
static int pmem_attach_disk(struct device *dev,
- struct nd_namespace_common *ndns)
+ struct nd_namespace_common *ndns)
{
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
struct nd_region *nd_region = to_nd_region(dev->parent);
@@ -393,7 +393,7 @@ static int pmem_attach_disk(struct device *dev,
}
if (!devm_request_mem_region(dev, res->start, resource_size(res),
- dev_name(&ndns->dev))) {
+ dev_name(&ndns->dev))) {
dev_warn(dev, "could not reserve region %pR\n", res);
return -EBUSY;
}
@@ -424,10 +424,10 @@ static int pmem_attach_disk(struct device *dev,
memcpy(&bb_res, &pmem->pgmap.res, sizeof(bb_res));
} else {
if (devm_add_action_or_reset(dev, pmem_release_queue,
- &pmem->pgmap))
+ &pmem->pgmap))
return -ENOMEM;
addr = devm_memremap(dev, pmem->phys_addr,
- pmem->size, ARCH_MEMREMAP_PMEM);
+ pmem->size, ARCH_MEMREMAP_PMEM);
memcpy(&bb_res, &nsio->res, sizeof(bb_res));
}
@@ -456,7 +456,7 @@ static int pmem_attach_disk(struct device *dev,
disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
- / 512);
+ / 512);
if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM;
nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
@@ -507,7 +507,7 @@ static int nd_pmem_probe(struct device *dev)
/* if we find a valid info-block we'll come back as that personality */
if (nd_btt_probe(dev, ndns) == 0 || nd_pfn_probe(dev, ndns) == 0
- || nd_dax_probe(dev, ndns) == 0)
+ || nd_dax_probe(dev, ndns) == 0)
return -ENXIO;
/* ...otherwise we're just a raw pmem device */
@@ -572,7 +572,7 @@ static void nd_pmem_notify(struct device *dev, enum nvdimm_event event)
ndns = nd_pfn->ndns;
offset = pmem->data_offset +
- __le32_to_cpu(pfn_sb->start_pad);
+ __le32_to_cpu(pfn_sb->start_pad);
end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
} else {
ndns = to_ndns(dev);
diff --git a/drivers/nvdimm/pmem.h b/drivers/nvdimm/pmem.h
index 59cfe13ea8a8..f5ba2a9a68ed 100644
--- a/drivers/nvdimm/pmem.h
+++ b/drivers/nvdimm/pmem.h
@@ -27,7 +27,7 @@ struct pmem_device {
};
long __pmem_direct_access(struct pmem_device *pmem, pgoff_t pgoff,
- long nr_pages, void **kaddr, pfn_t *pfn);
+ long nr_pages, void **kaddr, pfn_t *pfn);
#ifdef CONFIG_MEMORY_FAILURE
static inline bool test_and_clear_pmem_poison(struct page *page)
diff --git a/drivers/nvdimm/region.c b/drivers/nvdimm/region.c
index 37bf8719a2a4..8b7dbac27aea 100644
--- a/drivers/nvdimm/region.c
+++ b/drivers/nvdimm/region.c
@@ -17,13 +17,13 @@ static int nd_region_probe(struct device *dev)
struct nd_region *nd_region = to_nd_region(dev);
if (nd_region->num_lanes > num_online_cpus()
- && nd_region->num_lanes < num_possible_cpus()
- && !test_and_set_bit(0, &once)) {
+ && nd_region->num_lanes < num_possible_cpus()
+ && !test_and_set_bit(0, &once)) {
dev_dbg(dev, "online cpus (%d) < concurrent i/o lanes (%d) < possible cpus (%d)\n",
- num_online_cpus(), nd_region->num_lanes,
- num_possible_cpus());
+ num_online_cpus(), nd_region->num_lanes,
+ num_possible_cpus());
dev_dbg(dev, "setting nr_cpus=%d may yield better libnvdimm device performance\n",
- nd_region->num_lanes);
+ nd_region->num_lanes);
}
rc = nd_region_activate(nd_region);
@@ -43,7 +43,7 @@ static int nd_region_probe(struct device *dev)
"badblocks");
if (!nd_region->bb_state)
dev_warn(&nd_region->dev,
- "'badblocks' notification disabled\n");
+ "'badblocks' notification disabled\n");
ndr_res.start = nd_region->ndr_start;
ndr_res.end = nd_region->ndr_start + nd_region->ndr_size - 1;
nvdimm_badblocks_populate(nd_region, &nd_region->bb, &ndr_res);
@@ -76,7 +76,7 @@ static int nd_region_probe(struct device *dev)
* "<async-registered>/<total>" namespace count.
*/
dev_err(dev, "failed to register %d namespace%s, continuing...\n",
- err, err == 1 ? "" : "s");
+ err, err == 1 ? "" : "s");
return 0;
}
@@ -128,7 +128,7 @@ static void nd_region_notify(struct device *dev, enum nvdimm_event event)
res.end = nd_region->ndr_start +
nd_region->ndr_size - 1;
nvdimm_badblocks_populate(nd_region,
- &nd_region->bb, &res);
+ &nd_region->bb, &res);
if (nd_region->bb_state)
sysfs_notify_dirent(nd_region->bb_state);
}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index b477a8dc0020..025cd996ea58 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -23,12 +23,12 @@ static DEFINE_IDA(region_ida);
static DEFINE_PER_CPU(int, flush_idx);
static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
- struct nd_region_data *ndrd)
+ struct nd_region_data *ndrd)
{
int i, j;
dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
- nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
+ nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
for (i = 0; i < (1 << ndrd->hints_shift); i++) {
struct resource *res = &nvdimm->flush_wpq[i];
unsigned long pfn = PHYS_PFN(res->start);
@@ -45,15 +45,15 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
if (j < i)
flush_page = (void __iomem *) ((unsigned long)
- ndrd_get_flush_wpq(ndrd, dimm, j)
- & PAGE_MASK);
+ ndrd_get_flush_wpq(ndrd, dimm, j)
+ & PAGE_MASK);
else
flush_page = devm_nvdimm_ioremap(dev,
- PFN_PHYS(pfn), PAGE_SIZE);
+ PFN_PHYS(pfn), PAGE_SIZE);
if (!flush_page)
return -ENXIO;
ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
- + (res->start & ~PAGE_MASK));
+ + (res->start & ~PAGE_MASK));
}
return 0;
@@ -247,7 +247,7 @@ int nd_region_to_nstype(struct nd_region *nd_region)
EXPORT_SYMBOL(nd_region_to_nstype);
static ssize_t size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long size = 0;
@@ -265,7 +265,7 @@ static ssize_t size_show(struct device *dev,
static DEVICE_ATTR_RO(size);
static ssize_t deep_flush_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
@@ -277,7 +277,7 @@ static ssize_t deep_flush_show(struct device *dev,
}
static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t len)
+ const char *buf, size_t len)
{
bool flush;
int rc = strtobool(buf, &flush);
@@ -296,7 +296,7 @@ static ssize_t deep_flush_store(struct device *dev, struct device_attribute *att
static DEVICE_ATTR_RW(deep_flush);
static ssize_t mappings_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
@@ -305,7 +305,7 @@ static ssize_t mappings_show(struct device *dev,
static DEVICE_ATTR_RO(mappings);
static ssize_t nstype_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
@@ -314,7 +314,7 @@ static ssize_t nstype_show(struct device *dev,
static DEVICE_ATTR_RO(nstype);
static ssize_t set_cookie_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
struct nd_interleave_set *nd_set = nd_region->nd_set;
@@ -343,8 +343,8 @@ static ssize_t set_cookie_show(struct device *dev,
nsindex = to_namespace_index(ndd, ndd->ns_current);
rc = sprintf(buf, "%#llx\n",
- nd_region_interleave_set_cookie(nd_region,
- nsindex));
+ nd_region_interleave_set_cookie(nd_region,
+ nsindex));
}
}
nvdimm_bus_unlock(dev);
@@ -363,7 +363,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
- retry:
+retry:
available = 0;
overlap = blk_max_overlap;
for (i = 0; i < nd_region->ndr_mappings; i++) {
@@ -376,7 +376,7 @@ resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
if (is_memory(&nd_region->dev)) {
available += nd_pmem_available_dpa(nd_region,
- nd_mapping, &overlap);
+ nd_mapping, &overlap);
if (overlap > blk_max_overlap) {
blk_max_overlap = overlap;
goto retry;
@@ -413,7 +413,7 @@ resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
}
static ssize_t available_size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long available = 0;
@@ -436,7 +436,7 @@ static ssize_t available_size_show(struct device *dev,
static DEVICE_ATTR_RO(available_size);
static ssize_t max_available_extent_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long available = 0;
@@ -453,7 +453,7 @@ static ssize_t max_available_extent_show(struct device *dev,
static DEVICE_ATTR_RO(max_available_extent);
static ssize_t init_namespaces_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region_data *ndrd = dev_get_drvdata(dev);
ssize_t rc;
@@ -470,7 +470,7 @@ static ssize_t init_namespaces_show(struct device *dev,
static DEVICE_ATTR_RO(init_namespaces);
static ssize_t namespace_seed_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
@@ -486,7 +486,7 @@ static ssize_t namespace_seed_show(struct device *dev,
static DEVICE_ATTR_RO(namespace_seed);
static ssize_t btt_seed_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
@@ -503,7 +503,7 @@ static ssize_t btt_seed_show(struct device *dev,
static DEVICE_ATTR_RO(btt_seed);
static ssize_t pfn_seed_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
@@ -520,7 +520,7 @@ static ssize_t pfn_seed_show(struct device *dev,
static DEVICE_ATTR_RO(pfn_seed);
static ssize_t dax_seed_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
@@ -537,7 +537,7 @@ static ssize_t dax_seed_show(struct device *dev,
static DEVICE_ATTR_RO(dax_seed);
static ssize_t read_only_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
@@ -545,7 +545,7 @@ static ssize_t read_only_show(struct device *dev,
}
static ssize_t read_only_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr, const char *buf, size_t len)
{
bool ro;
int rc = strtobool(buf, &ro);
@@ -560,7 +560,7 @@ static ssize_t read_only_store(struct device *dev,
static DEVICE_ATTR_RW(read_only);
static ssize_t region_badblocks_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
ssize_t rc;
@@ -577,7 +577,7 @@ static ssize_t region_badblocks_show(struct device *dev,
static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
static ssize_t resource_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
@@ -586,7 +586,7 @@ static ssize_t resource_show(struct device *dev,
static DEVICE_ATTR_RO(resource);
static ssize_t persistence_domain_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr, char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
@@ -655,18 +655,18 @@ static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
if (a == &dev_attr_persistence_domain.attr) {
if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
- | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
+ | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
return 0;
return a->mode;
}
if (a != &dev_attr_set_cookie.attr
- && a != &dev_attr_available_size.attr)
+ && a != &dev_attr_available_size.attr)
return a->mode;
if ((type == ND_DEVICE_NAMESPACE_PMEM
- || type == ND_DEVICE_NAMESPACE_BLK)
- && a == &dev_attr_available_size.attr)
+ || type == ND_DEVICE_NAMESPACE_BLK)
+ && a == &dev_attr_available_size.attr)
return a->mode;
else if (is_memory(dev) && nd_set)
return a->mode;
@@ -681,7 +681,7 @@ struct attribute_group nd_region_attribute_group = {
EXPORT_SYMBOL_GPL(nd_region_attribute_group);
u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
- struct nd_namespace_index *nsindex)
+ struct nd_namespace_index *nsindex)
{
struct nd_interleave_set *nd_set = nd_region->nd_set;
@@ -689,7 +689,7 @@ u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
return 0;
if (nsindex && __le16_to_cpu(nsindex->major) == 1
- && __le16_to_cpu(nsindex->minor) == 1)
+ && __le16_to_cpu(nsindex->minor) == 1)
return nd_set->cookie1;
return nd_set->cookie2;
}
@@ -721,7 +721,7 @@ void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
* disable the region.
*/
static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
- struct device *dev, bool probe)
+ struct device *dev, bool probe)
{
struct nd_region *nd_region;
@@ -808,17 +808,17 @@ static ssize_t mappingN(struct device *dev, char *buf, int n)
nvdimm = nd_mapping->nvdimm;
return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
- nd_mapping->start, nd_mapping->size,
- nd_mapping->position);
+ nd_mapping->start, nd_mapping->size,
+ nd_mapping->position);
}
-#define REGION_MAPPING(idx) \
-static ssize_t mapping##idx##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
-{ \
- return mappingN(dev, buf, idx); \
-} \
-static DEVICE_ATTR_RO(mapping##idx)
+#define REGION_MAPPING(idx) \
+ static ssize_t mapping##idx##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+ { \
+ return mappingN(dev, buf, idx); \
+ } \
+ static DEVICE_ATTR_RO(mapping##idx)
/*
* 32 should be enough for a while, even in the presence of socket
@@ -979,8 +979,8 @@ void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
EXPORT_SYMBOL(nd_region_release_lane);
static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
- struct nd_region_desc *ndr_desc, struct device_type *dev_type,
- const char *caller)
+ struct nd_region_desc *ndr_desc, struct device_type *dev_type,
+ const char *caller)
{
struct nd_region *nd_region;
struct device *dev;
@@ -994,7 +994,7 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
if ((mapping->start | mapping->size) % SZ_4K) {
dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
- caller, dev_name(&nvdimm->dev), i);
+ caller, dev_name(&nvdimm->dev), i);
return NULL;
}
@@ -1003,9 +1003,9 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
ro = 1;
if (test_bit(NDD_NOBLK, &nvdimm->flags)
- && dev_type == &nd_blk_device_type) {
+ && dev_type == &nd_blk_device_type) {
dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not BLK capable\n",
- caller, dev_name(&nvdimm->dev), i);
+ caller, dev_name(&nvdimm->dev), i);
return NULL;
}
}
@@ -1016,8 +1016,8 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
ndbr_desc = to_blk_region_desc(ndr_desc);
ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
- * ndr_desc->num_mappings,
- GFP_KERNEL);
+ * ndr_desc->num_mappings,
+ GFP_KERNEL);
if (ndbr) {
nd_region = &ndbr->nd_region;
ndbr->enable = ndbr_desc->enable;
@@ -1091,39 +1091,39 @@ static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
return nd_region;
- err_percpu:
+err_percpu:
ida_simple_remove(®ion_ida, nd_region->id);
- err_id:
+err_id:
kfree(region_buf);
return NULL;
}
struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
- struct nd_region_desc *ndr_desc)
+ struct nd_region_desc *ndr_desc)
{
ndr_desc->num_lanes = ND_MAX_LANES;
return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
- __func__);
+ __func__);
}
EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
- struct nd_region_desc *ndr_desc)
+ struct nd_region_desc *ndr_desc)
{
if (ndr_desc->num_mappings > 1)
return NULL;
ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
- __func__);
+ __func__);
}
EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
- struct nd_region_desc *ndr_desc)
+ struct nd_region_desc *ndr_desc)
{
ndr_desc->num_lanes = ND_MAX_LANES;
return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
- __func__);
+ __func__);
}
EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
@@ -1187,7 +1187,7 @@ int nvdimm_has_flush(struct nd_region *nd_region)
/* no nvdimm or pmem api == flushing capability unknown */
if (nd_region->ndr_mappings == 0
- || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
+ || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
return -ENXIO;
for (i = 0; i < nd_region->ndr_mappings; i++) {
@@ -1250,7 +1250,7 @@ static int region_conflict(struct device *dev, void *data)
}
int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
- resource_size_t size)
+ resource_size_t size)
{
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
struct conflict_context ctx = {
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index e32e43965e8d..be89ad78a368 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -78,7 +78,7 @@ static struct key *nvdimm_request_key(struct nvdimm *nvdimm)
}
static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
- struct key **key)
+ struct key **key)
{
*key = nvdimm_request_key(nvdimm);
if (!*key)
@@ -88,7 +88,7 @@ static const void *nvdimm_get_key_payload(struct nvdimm *nvdimm,
}
static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
- key_serial_t id, int subclass)
+ key_serial_t id, int subclass)
{
key_ref_t keyref;
struct key *key;
@@ -118,7 +118,7 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
}
static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
- key_serial_t id, int subclass, struct key **key)
+ key_serial_t id, int subclass, struct key **key)
{
*key = NULL;
if (id == 0) {
@@ -174,7 +174,7 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->unlock
- || !nvdimm->sec.flags)
+ || !nvdimm->sec.flags)
return -EIO;
if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
@@ -199,7 +199,7 @@ static int __nvdimm_security_unlock(struct nvdimm *nvdimm)
rc = nvdimm->sec.ops->unlock(nvdimm, data);
dev_dbg(dev, "key: %d unlock: %s\n", key_serial(key),
- rc == 0 ? "success" : "fail");
+ rc == 0 ? "success" : "fail");
nvdimm_put_key(key);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
@@ -223,7 +223,7 @@ static int check_security_state(struct nvdimm *nvdimm)
if (test_bit(NVDIMM_SECURITY_FROZEN, &nvdimm->sec.flags)) {
dev_dbg(dev, "Incorrect security state: %#lx\n",
- nvdimm->sec.flags);
+ nvdimm->sec.flags);
return -EIO;
}
@@ -247,7 +247,7 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->disable
- || !nvdimm->sec.flags)
+ || !nvdimm->sec.flags)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
@@ -255,13 +255,13 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
return rc;
data = nvdimm_get_user_key_payload(nvdimm, keyid,
- NVDIMM_BASE_KEY, &key);
+ NVDIMM_BASE_KEY, &key);
if (!data)
return -ENOKEY;
rc = nvdimm->sec.ops->disable(nvdimm, data);
dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
- rc == 0 ? "success" : "fail");
+ rc == 0 ? "success" : "fail");
nvdimm_put_key(key);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
@@ -269,8 +269,8 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
}
static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
- unsigned int new_keyid,
- enum nvdimm_passphrase_type pass_type)
+ unsigned int new_keyid,
+ enum nvdimm_passphrase_type pass_type)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
@@ -282,7 +282,7 @@ static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->change_key
- || !nvdimm->sec.flags)
+ || !nvdimm->sec.flags)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
@@ -290,12 +290,12 @@ static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
return rc;
data = nvdimm_get_user_key_payload(nvdimm, keyid,
- NVDIMM_BASE_KEY, &key);
+ NVDIMM_BASE_KEY, &key);
if (!data)
return -ENOKEY;
newdata = nvdimm_get_user_key_payload(nvdimm, new_keyid,
- NVDIMM_NEW_KEY, &newkey);
+ NVDIMM_NEW_KEY, &newkey);
if (!newdata) {
nvdimm_put_key(key);
return -ENOKEY;
@@ -303,23 +303,23 @@ static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
rc = nvdimm->sec.ops->change_key(nvdimm, data, newdata, pass_type);
dev_dbg(dev, "key: %d %d update%s: %s\n",
- key_serial(key), key_serial(newkey),
- pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
- rc == 0 ? "success" : "fail");
+ key_serial(key), key_serial(newkey),
+ pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
+ rc == 0 ? "success" : "fail");
nvdimm_put_key(newkey);
nvdimm_put_key(key);
if (pass_type == NVDIMM_MASTER)
nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm,
- NVDIMM_MASTER);
+ NVDIMM_MASTER);
else
nvdimm->sec.flags = nvdimm_security_flags(nvdimm,
- NVDIMM_USER);
+ NVDIMM_USER);
return rc;
}
static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
- enum nvdimm_passphrase_type pass_type)
+ enum nvdimm_passphrase_type pass_type)
{
struct device *dev = &nvdimm->dev;
struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
@@ -331,7 +331,7 @@ static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->erase
- || !nvdimm->sec.flags)
+ || !nvdimm->sec.flags)
return -EOPNOTSUPP;
rc = check_security_state(nvdimm);
@@ -339,21 +339,21 @@ static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
return rc;
if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags)
- && pass_type == NVDIMM_MASTER) {
+ && pass_type == NVDIMM_MASTER) {
dev_dbg(dev,
"Attempt to secure erase in wrong master state.\n");
return -EOPNOTSUPP;
}
data = nvdimm_get_user_key_payload(nvdimm, keyid,
- NVDIMM_BASE_KEY, &key);
+ NVDIMM_BASE_KEY, &key);
if (!data)
return -ENOKEY;
rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
- pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
- rc == 0 ? "success" : "fail");
+ pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
+ rc == 0 ? "success" : "fail");
nvdimm_put_key(key);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
@@ -372,7 +372,7 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
lockdep_assert_held(&nvdimm_bus->reconfig_mutex);
if (!nvdimm->sec.ops || !nvdimm->sec.ops->overwrite
- || !nvdimm->sec.flags)
+ || !nvdimm->sec.flags)
return -EOPNOTSUPP;
if (dev->driver == NULL) {
@@ -385,13 +385,13 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
return rc;
data = nvdimm_get_user_key_payload(nvdimm, keyid,
- NVDIMM_BASE_KEY, &key);
+ NVDIMM_BASE_KEY, &key);
if (!data)
return -ENOKEY;
rc = nvdimm->sec.ops->overwrite(nvdimm, data);
dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
- rc == 0 ? "success" : "fail");
+ rc == 0 ? "success" : "fail");
nvdimm_put_key(key);
if (rc == 0) {
@@ -428,7 +428,7 @@ void __nvdimm_security_overwrite_query(struct nvdimm *nvdimm)
tmo = nvdimm->sec.overwrite_tmo;
if (!nvdimm->sec.ops || !nvdimm->sec.ops->query_overwrite
- || !nvdimm->sec.flags)
+ || !nvdimm->sec.flags)
return;
rc = nvdimm->sec.ops->query_overwrite(nvdimm);
@@ -466,14 +466,14 @@ void nvdimm_security_overwrite_query(struct work_struct *work)
nvdimm_bus_unlock(&nvdimm->dev);
}
-#define OPS \
- C( OP_FREEZE, "freeze", 1), \
- C( OP_DISABLE, "disable", 2), \
- C( OP_UPDATE, "update", 3), \
- C( OP_ERASE, "erase", 2), \
- C( OP_OVERWRITE, "overwrite", 2), \
- C( OP_MASTER_UPDATE, "master_update", 3), \
- C( OP_MASTER_ERASE, "master_erase", 2)
+#define OPS \
+ C( OP_FREEZE, "freeze", 1), \
+ C( OP_DISABLE, "disable", 2), \
+ C( OP_UPDATE, "update", 3), \
+ C( OP_ERASE, "erase", 2), \
+ C( OP_OVERWRITE, "overwrite", 2), \
+ C( OP_MASTER_UPDATE, "master_update", 3), \
+ C( OP_MASTER_ERASE, "master_erase", 2)
#undef C
#define C(a, b, c) a
enum nvdimmsec_op_ids { OPS };
@@ -498,9 +498,9 @@ ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
int i;
rc = sscanf(buf, "%"__stringify(SEC_CMD_SIZE)"s"
- " %"__stringify(KEY_ID_SIZE)"s"
- " %"__stringify(KEY_ID_SIZE)"s",
- cmd, keystr, nkeystr);
+ " %"__stringify(KEY_ID_SIZE)"s"
+ " %"__stringify(KEY_ID_SIZE)"s",
+ cmd, keystr, nkeystr);
if (rc < 1)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(ops); i++)
@@ -524,7 +524,7 @@ ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
} else if (i == OP_UPDATE || i == OP_MASTER_UPDATE) {
dev_dbg(dev, "%s %u %u\n", ops[i].name, key, newkey);
rc = security_update(nvdimm, key, newkey, i == OP_UPDATE
- ? NVDIMM_USER : NVDIMM_MASTER);
+ ? NVDIMM_USER : NVDIMM_MASTER);
} else if (i == OP_ERASE || i == OP_MASTER_ERASE) {
dev_dbg(dev, "%s %u\n", ops[i].name, key);
if (atomic_read(&nvdimm->busy)) {
@@ -532,7 +532,7 @@ ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
return -EBUSY;
}
rc = security_erase(nvdimm, key, i == OP_ERASE
- ? NVDIMM_USER : NVDIMM_MASTER);
+ ? NVDIMM_USER : NVDIMM_MASTER);
} else if (i == OP_OVERWRITE) {
dev_dbg(dev, "overwrite %u\n", key);
if (atomic_read(&nvdimm->busy)) {
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index 5e3d07b47e0c..ce2181e06756 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -14,12 +14,12 @@ static struct virtio_device_id id_table[] = {
{ 0 },
};
- /* Initialize virt queue */
+/* Initialize virt queue */
static int init_vq(struct virtio_pmem *vpmem)
{
/* single vq */
vpmem->req_vq = virtio_find_single_vq(vpmem->vdev,
- virtio_pmem_host_ack, "flush_queue");
+ virtio_pmem_host_ack, "flush_queue");
if (IS_ERR(vpmem->req_vq))
return PTR_ERR(vpmem->req_vq);
@@ -59,9 +59,9 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
}
virtio_cread(vpmem->vdev, struct virtio_pmem_config,
- start, &vpmem->start);
+ start, &vpmem->start);
virtio_cread(vpmem->vdev, struct virtio_pmem_config,
- size, &vpmem->size);
+ size, &vpmem->size);
res.start = vpmem->start;
res.end = vpmem->start + vpmem->size - 1;
--
2.15.0
Random neatening, mostly trivially wrapping to 80 columns, to make the
code a bit more kernel style compatible.
Use casts to (u64) and not (unsigned long long)
Signed-off-by: Joe Perches <[email protected]>
---
drivers/nvdimm/badrange.c | 3 +-
drivers/nvdimm/blk.c | 18 ++++---
drivers/nvdimm/btt.c | 22 ++++----
drivers/nvdimm/btt_devs.c | 42 +++++++++-------
drivers/nvdimm/bus.c | 25 ++++-----
drivers/nvdimm/claim.c | 11 ++--
drivers/nvdimm/core.c | 4 +-
drivers/nvdimm/dimm_devs.c | 18 ++++---
drivers/nvdimm/label.c | 35 +++++++------
drivers/nvdimm/label.h | 6 ++-
drivers/nvdimm/namespace_devs.c | 109 +++++++++++++++++++++++-----------------
drivers/nvdimm/nd-core.h | 13 ++---
drivers/nvdimm/nd.h | 26 +++++-----
drivers/nvdimm/nd_virtio.c | 3 +-
drivers/nvdimm/pfn_devs.c | 43 ++++++++--------
drivers/nvdimm/pmem.c | 14 +++---
drivers/nvdimm/region_devs.c | 36 +++++++------
drivers/nvdimm/security.c | 28 +++++------
drivers/nvdimm/virtio_pmem.c | 4 +-
19 files changed, 254 insertions(+), 206 deletions(-)
diff --git a/drivers/nvdimm/badrange.c b/drivers/nvdimm/badrange.c
index 681d99c59f52..4d231643c095 100644
--- a/drivers/nvdimm/badrange.c
+++ b/drivers/nvdimm/badrange.c
@@ -24,7 +24,8 @@ void badrange_init(struct badrange *badrange)
EXPORT_SYMBOL_GPL(badrange_init);
static void append_badrange_entry(struct badrange *badrange,
- struct badrange_entry *bre, u64 addr, u64 length)
+ struct badrange_entry *bre,
+ u64 addr, u64 length)
{
lockdep_assert_held(&badrange->lock);
bre->start = addr;
diff --git a/drivers/nvdimm/blk.c b/drivers/nvdimm/blk.c
index db3973c7f506..fc15aa9220c8 100644
--- a/drivers/nvdimm/blk.c
+++ b/drivers/nvdimm/blk.c
@@ -29,7 +29,8 @@ static u32 nsblk_sector_size(struct nd_namespace_blk *nsblk)
}
static resource_size_t to_dev_offset(struct nd_namespace_blk *nsblk,
- resource_size_t ns_offset, unsigned int len)
+ resource_size_t ns_offset,
+ unsigned int len)
{
int i;
@@ -61,7 +62,8 @@ static struct nd_blk_region *to_ndbr(struct nd_namespace_blk *nsblk)
#ifdef CONFIG_BLK_DEV_INTEGRITY
static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
- struct bio_integrity_payload *bip, u64 lba, int rw)
+ struct bio_integrity_payload *bip,
+ u64 lba, int rw)
{
struct nd_blk_region *ndbr = to_ndbr(nsblk);
unsigned int len = nsblk_meta_size(nsblk);
@@ -107,7 +109,8 @@ static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
#else /* CONFIG_BLK_DEV_INTEGRITY */
static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
- struct bio_integrity_payload *bip, u64 lba, int rw)
+ struct bio_integrity_payload *bip,
+ u64 lba, int rw)
{
return 0;
}
@@ -115,7 +118,8 @@ static int nd_blk_rw_integrity(struct nd_namespace_blk *nsblk,
static int nsblk_do_bvec(struct nd_namespace_blk *nsblk,
struct bio_integrity_payload *bip, struct page *page,
- unsigned int len, unsigned int off, int rw, sector_t sector)
+ unsigned int len, unsigned int off, int rw,
+ sector_t sector)
{
struct nd_blk_region *ndbr = to_ndbr(nsblk);
resource_size_t dev_offset, ns_offset;
@@ -187,9 +191,9 @@ static blk_qc_t nd_blk_make_request(struct request_queue *q, struct bio *bio)
bvec.bv_offset, rw, iter.bi_sector);
if (err) {
dev_dbg(&nsblk->common.dev,
- "io error in %s sector %lld, len %d,\n",
- (rw == READ) ? "READ" : "WRITE",
- (unsigned long long)iter.bi_sector, len);
+ "io error in %s sector %lld, len %d\n",
+ rw == READ ? "READ" : "WRITE",
+ (u64)iter.bi_sector, len);
bio->bi_status = errno_to_blk_status(err);
break;
}
diff --git a/drivers/nvdimm/btt.c b/drivers/nvdimm/btt.c
index 0df4461fe607..6c18d7bba6af 100644
--- a/drivers/nvdimm/btt.c
+++ b/drivers/nvdimm/btt.c
@@ -589,7 +589,8 @@ static int btt_freelist_init(struct arena_info *arena)
* to complete the map write. So fix up the map.
*/
ret = btt_map_write(arena, le32_to_cpu(log_new.lba),
- le32_to_cpu(log_new.new_map), 0, 0, 0);
+ le32_to_cpu(log_new.new_map),
+ 0, 0, 0);
if (ret)
return ret;
}
@@ -827,8 +828,9 @@ static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
arena->version_major = le16_to_cpu(super->version_major);
arena->version_minor = le16_to_cpu(super->version_minor);
- arena->nextoff = (super->nextoff == 0) ? 0 : (arena_off +
- le64_to_cpu(super->nextoff));
+ arena->nextoff = (super->nextoff == 0)
+ ? 0
+ : arena_off + le64_to_cpu(super->nextoff);
arena->infooff = arena_off;
arena->dataoff = arena_off + le64_to_cpu(super->dataoff);
arena->mapoff = arena_off + le64_to_cpu(super->mapoff);
@@ -836,8 +838,8 @@ static void parse_arena_meta(struct arena_info *arena, struct btt_sb *super,
arena->info2off = arena_off + le64_to_cpu(super->info2off);
arena->size = (le64_to_cpu(super->nextoff) > 0)
- ? (le64_to_cpu(super->nextoff))
- : (arena->info2off - arena->infooff + BTT_PG_SIZE);
+ ? le64_to_cpu(super->nextoff)
+ : arena->info2off - arena->infooff + BTT_PG_SIZE;
arena->flags = le32_to_cpu(super->flags);
}
@@ -1457,7 +1459,8 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
if (len > PAGE_SIZE || len < btt->sector_size ||
len % btt->sector_size) {
dev_err_ratelimited(&btt->nd_btt->dev,
- "unaligned bio segment (len: %d)\n", len);
+ "unaligned bio segment (len: %d)\n",
+ len);
bio->bi_status = BLK_STS_IOERR;
break;
}
@@ -1466,10 +1469,9 @@ static blk_qc_t btt_make_request(struct request_queue *q, struct bio *bio)
bio_op(bio), iter.bi_sector);
if (err) {
dev_err(&btt->nd_btt->dev,
- "io error in %s sector %lld, len %d,\n",
- (op_is_write(bio_op(bio))) ? "WRITE" :
- "READ",
- (unsigned long long)iter.bi_sector, len);
+ "io error in %s sector %lld, len %d\n",
+ op_is_write(bio_op(bio)) ? "WRITE" : "READ",
+ (u64)iter.bi_sector, len);
bio->bi_status = errno_to_blk_status(err);
break;
}
diff --git a/drivers/nvdimm/btt_devs.c b/drivers/nvdimm/btt_devs.c
index 9e0f17045e69..b27993ade004 100644
--- a/drivers/nvdimm/btt_devs.c
+++ b/drivers/nvdimm/btt_devs.c
@@ -45,8 +45,9 @@ struct nd_btt *to_nd_btt(struct device *dev)
}
EXPORT_SYMBOL(to_nd_btt);
-static const unsigned long btt_lbasize_supported[] = { 512, 520, 528,
- 4096, 4104, 4160, 4224, 0 };
+static const unsigned long btt_lbasize_supported[] = {
+ 512, 520, 528, 4096, 4104, 4160, 4224, 0
+};
static ssize_t sector_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -57,7 +58,8 @@ static ssize_t sector_size_show(struct device *dev,
}
static ssize_t sector_size_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
@@ -66,8 +68,8 @@ static ssize_t sector_size_store(struct device *dev,
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_btt->lbasize,
btt_lbasize_supported);
- dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ dev_dbg(dev, "result: %zd wrote: %s%s",
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -85,37 +87,38 @@ static ssize_t uuid_show(struct device *dev,
return sprintf(buf, "\n");
}
-static ssize_t uuid_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
nd_device_lock(dev);
rc = nd_uuid_store(dev, &nd_btt->uuid, buf, len);
- dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ dev_dbg(dev, "result: %zd wrote: %s%s",
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
nd_device_unlock(dev);
return rc ? rc : len;
}
static DEVICE_ATTR_RW(uuid);
-static ssize_t namespace_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t namespace_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
nvdimm_bus_lock(dev);
- rc = sprintf(buf, "%s\n", nd_btt->ndns
- ? dev_name(&nd_btt->ndns->dev) : "");
+ rc = sprintf(buf, "%s\n",
+ nd_btt->ndns ? dev_name(&nd_btt->ndns->dev) : "");
nvdimm_bus_unlock(dev);
return rc;
}
static ssize_t namespace_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
@@ -123,8 +126,8 @@ static ssize_t namespace_store(struct device *dev,
nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_btt->ndns, buf, len);
- dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ dev_dbg(dev, "result: %zd wrote: %s%s",
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -132,8 +135,8 @@ static ssize_t namespace_store(struct device *dev,
}
static DEVICE_ATTR_RW(namespace);
-static ssize_t size_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+static ssize_t size_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
{
struct nd_btt *nd_btt = to_nd_btt(dev);
ssize_t rc;
@@ -304,7 +307,8 @@ int nd_btt_version(struct nd_btt *nd_btt, struct nd_namespace_common *ndns,
EXPORT_SYMBOL(nd_btt_version);
static int __nd_btt_probe(struct nd_btt *nd_btt,
- struct nd_namespace_common *ndns, struct btt_sb *btt_sb)
+ struct nd_namespace_common *ndns,
+ struct btt_sb *btt_sb)
{
int rc;
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 620f07ac306c..733b2a2117c0 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -103,8 +103,8 @@ static int nvdimm_bus_probe(struct device *dev)
nd_region_disable(nvdimm_bus, dev);
nvdimm_bus_probe_end(nvdimm_bus);
- dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n", dev->driver->name,
- dev_name(dev), rc);
+ dev_dbg(&nvdimm_bus->dev, "END: %s.probe(%s) = %d\n",
+ dev->driver->name, dev_name(dev), rc);
if (rc != 0)
module_put(provider);
@@ -125,8 +125,8 @@ static int nvdimm_bus_remove(struct device *dev)
}
nd_region_disable(nvdimm_bus, dev);
- dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n", dev->driver->name,
- dev_name(dev), rc);
+ dev_dbg(&nvdimm_bus->dev, "%s.remove(%s) = %d\n",
+ dev->driver->name, dev_name(dev), rc);
module_put(provider);
return rc;
}
@@ -846,8 +846,9 @@ u32 nd_cmd_in_size(struct nvdimm *nvdimm, int cmd,
EXPORT_SYMBOL_GPL(nd_cmd_in_size);
u32 nd_cmd_out_size(struct nvdimm *nvdimm, int cmd,
- const struct nd_cmd_desc *desc, int idx, const u32 *in_field,
- const u32 *out_field, unsigned long remainder)
+ const struct nd_cmd_desc *desc, int idx,
+ const u32 *in_field, const u32 *out_field,
+ unsigned long remainder)
{
if (idx >= desc->out_num)
return UINT_MAX;
@@ -951,7 +952,8 @@ static int nd_ns_forget_poison_check(struct device *dev, void *data)
/* set_config requires an idle interleave set */
static int nd_cmd_clear_to_send(struct nvdimm_bus *nvdimm_bus,
- struct nvdimm *nvdimm, unsigned int cmd, void *data)
+ struct nvdimm *nvdimm,
+ unsigned int cmd, void *data)
{
struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
@@ -1025,7 +1027,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
case ND_CMD_ARS_START:
case ND_CMD_CLEAR_ERROR:
case ND_CMD_CALL:
- dev_dbg(dev, "'%s' command while read-only.\n",
+ dev_dbg(dev, "'%s' command while read-only\n",
nvdimm ? nvdimm_cmd_name(cmd)
: nvdimm_bus_cmd_name(cmd));
return -EPERM;
@@ -1061,8 +1063,7 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
if (cmd == ND_CMD_CALL) {
func = pkg.nd_command;
dev_dbg(dev, "%s, idx: %llu, in: %u, out: %u, len %llu\n",
- dimm_name, pkg.nd_command,
- in_len, out_len, buf_len);
+ dimm_name, pkg.nd_command, in_len, out_len, buf_len);
}
/* process an output envelope */
@@ -1097,8 +1098,8 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
buf_len = (u64)out_len + (u64)in_len;
if (buf_len > ND_IOCTL_MAX_BUFLEN) {
- dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n", dimm_name,
- cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
+ dev_dbg(dev, "%s cmd: %s buf_len: %llu > %d\n",
+ dimm_name, cmd_name, buf_len, ND_IOCTL_MAX_BUFLEN);
rc = -EINVAL;
goto out;
}
diff --git a/drivers/nvdimm/claim.c b/drivers/nvdimm/claim.c
index 244631f5308c..953029c240e5 100644
--- a/drivers/nvdimm/claim.c
+++ b/drivers/nvdimm/claim.c
@@ -132,8 +132,8 @@ static void nd_detach_and_reset(struct device *dev,
}
ssize_t nd_namespace_store(struct device *dev,
- struct nd_namespace_common **_ndns, const char *buf,
- size_t len)
+ struct nd_namespace_common **_ndns,
+ const char *buf, size_t len)
{
struct nd_namespace_common *ndns;
struct device *found;
@@ -158,7 +158,8 @@ ssize_t nd_namespace_store(struct device *dev,
if (strcmp(name, "") == 0) {
nd_detach_and_reset(dev, _ndns);
goto out;
- } else if (ndns) {
+ }
+ if (ndns) {
dev_dbg(dev, "namespace already set to: %s\n",
dev_name(&ndns->dev));
len = -EBUSY;
@@ -200,7 +201,6 @@ ssize_t nd_namespace_store(struct device *dev,
default:
len = -EBUSY;
goto out_attach;
- break;
}
if (__nvdimm_namespace_capacity(ndns) < SZ_16M) {
@@ -278,7 +278,8 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
might_sleep();
cleared = nvdimm_clear_poison(&ndns->dev,
- nsio->res.start + offset, size);
+ nsio->res.start + offset,
+ size);
if (cleared < size)
rc = -EIO;
if (cleared > 0 && cleared / 512) {
diff --git a/drivers/nvdimm/core.c b/drivers/nvdimm/core.c
index e30b39f49c46..deb92c806abf 100644
--- a/drivers/nvdimm/core.c
+++ b/drivers/nvdimm/core.c
@@ -208,9 +208,7 @@ EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
static bool is_uuid_sep(char sep)
{
- if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0')
- return true;
- return false;
+ return sep == '\n' || sep == '-' || sep == ':' || sep == '\0';
}
static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
diff --git a/drivers/nvdimm/dimm_devs.c b/drivers/nvdimm/dimm_devs.c
index cac62bb726bb..35a6c20d30fd 100644
--- a/drivers/nvdimm/dimm_devs.c
+++ b/drivers/nvdimm/dimm_devs.c
@@ -114,7 +114,8 @@ int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
cmd_size = sizeof(*cmd) + cmd->in_length;
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
+ ND_CMD_GET_CONFIG_DATA, cmd, cmd_size,
+ &cmd_rc);
if (rc < 0)
break;
if (cmd_rc < 0) {
@@ -162,7 +163,8 @@ int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
- ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
+ ND_CMD_SET_CONFIG_DATA, cmd, cmd_size,
+ &cmd_rc);
if (rc < 0)
break;
if (cmd_rc < 0) {
@@ -341,8 +343,8 @@ static ssize_t state_show(struct device *dev, struct device_attribute *attr,
*/
nvdimm_bus_lock(dev);
nvdimm_bus_unlock(dev);
- return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
- ? "active" : "idle");
+ return sprintf(buf, "%s\n",
+ atomic_read(&nvdimm->busy) ? "active" : "idle");
}
static DEVICE_ATTR_RO(state);
@@ -397,7 +399,8 @@ static ssize_t frozen_show(struct device *dev,
static DEVICE_ATTR_RO(frozen);
static ssize_t security_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
ssize_t rc;
@@ -551,7 +554,7 @@ int nvdimm_security_freeze(struct nvdimm *nvdimm)
return -EIO;
if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
- dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
+ dev_warn(&nvdimm->dev, "Overwrite operation in progress\n");
return -EBUSY;
}
@@ -711,7 +714,8 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
* the set can be established.
*/
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, resource_size_t *overlap)
+ struct nd_mapping *nd_mapping,
+ resource_size_t *overlap)
{
resource_size_t map_start, map_end, busy = 0, available, blk_start;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index ae466c6faa90..9bf75dad8e93 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -159,7 +159,8 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
seq = __le32_to_cpu(nsindex[i]->seq);
if ((seq & NSINDEX_SEQ_MASK) == 0) {
- dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
+ dev_dbg(dev, "nsindex%d sequence: %#x invalid\n",
+ i, seq);
continue;
}
@@ -167,29 +168,27 @@ static int __nd_label_validate(struct nvdimm_drvdata *ndd)
if (__le64_to_cpu(nsindex[i]->myoff)
!= i * sizeof_namespace_index(ndd)) {
dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
- i, (unsigned long long)
- __le64_to_cpu(nsindex[i]->myoff));
+ i, (u64)__le64_to_cpu(nsindex[i]->myoff));
continue;
}
if (__le64_to_cpu(nsindex[i]->otheroff)
!= (!i) * sizeof_namespace_index(ndd)) {
dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
- i, (unsigned long long)
- __le64_to_cpu(nsindex[i]->otheroff));
+ i, (u64)__le64_to_cpu(nsindex[i]->otheroff));
continue;
}
if (__le64_to_cpu(nsindex[i]->labeloff)
!= 2 * sizeof_namespace_index(ndd)) {
dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
- i, (unsigned long long)
- __le64_to_cpu(nsindex[i]->labeloff));
+ i, (u64)__le64_to_cpu(nsindex[i]->labeloff));
continue;
}
size = __le64_to_cpu(nsindex[i]->mysize);
if (size > sizeof_namespace_index(ndd) ||
size < sizeof(struct nd_namespace_index)) {
- dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
+ dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n",
+ i, size);
continue;
}
@@ -717,13 +716,13 @@ enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
{
if (guid_equal(guid, &nvdimm_btt_guid))
return NVDIMM_CCLASS_BTT;
- else if (guid_equal(guid, &nvdimm_btt2_guid))
+ if (guid_equal(guid, &nvdimm_btt2_guid))
return NVDIMM_CCLASS_BTT2;
- else if (guid_equal(guid, &nvdimm_pfn_guid))
+ if (guid_equal(guid, &nvdimm_pfn_guid))
return NVDIMM_CCLASS_PFN;
- else if (guid_equal(guid, &nvdimm_dax_guid))
+ if (guid_equal(guid, &nvdimm_dax_guid))
return NVDIMM_CCLASS_DAX;
- else if (guid_equal(guid, &guid_null))
+ if (guid_equal(guid, &guid_null))
return NVDIMM_CCLASS_NONE;
return NVDIMM_CCLASS_UNKNOWN;
@@ -763,7 +762,8 @@ static void reap_victim(struct nd_mapping *nd_mapping,
}
static int __pmem_label_update(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
+ struct nd_mapping *nd_mapping,
+ struct nd_namespace_pmem *nspm,
int pos, unsigned long flags)
{
struct nd_namespace_common *ndns = &nspm->nsio.common;
@@ -901,7 +901,8 @@ static struct resource *to_resource(struct nvdimm_drvdata *ndd,
* 3/ Record the resources in the namespace device
*/
static int __blk_label_update(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
+ struct nd_mapping *nd_mapping,
+ struct nd_namespace_blk *nsblk,
int num_labels)
{
int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
@@ -1245,7 +1246,8 @@ static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
}
int nd_pmem_namespace_label_update(struct nd_region *nd_region,
- struct nd_namespace_pmem *nspm, resource_size_t size)
+ struct nd_namespace_pmem *nspm,
+ resource_size_t size)
{
int i, rc;
@@ -1293,7 +1295,8 @@ int nd_pmem_namespace_label_update(struct nd_region *nd_region,
}
int nd_blk_namespace_label_update(struct nd_region *nd_region,
- struct nd_namespace_blk *nsblk, resource_size_t size)
+ struct nd_namespace_blk *nsblk,
+ resource_size_t size)
{
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct resource *res;
diff --git a/drivers/nvdimm/label.h b/drivers/nvdimm/label.h
index aff33d09fec3..a008ec92f78c 100644
--- a/drivers/nvdimm/label.h
+++ b/drivers/nvdimm/label.h
@@ -140,7 +140,9 @@ struct nd_region;
struct nd_namespace_pmem;
struct nd_namespace_blk;
int nd_pmem_namespace_label_update(struct nd_region *nd_region,
- struct nd_namespace_pmem *nspm, resource_size_t size);
+ struct nd_namespace_pmem *nspm,
+ resource_size_t size);
int nd_blk_namespace_label_update(struct nd_region *nd_region,
- struct nd_namespace_blk *nsblk, resource_size_t size);
+ struct nd_namespace_blk *nsblk,
+ resource_size_t size);
#endif /* __LABEL_H__ */
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 253f07d97b73..d53efe06d312 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -198,17 +198,17 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
}
if (nsidx)
- sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
- suffix ? suffix : "");
+ sprintf(name, "pmem%d.%d%s",
+ nd_region->id, nsidx, suffix ? suffix : "");
else
- sprintf(name, "pmem%d%s", nd_region->id,
- suffix ? suffix : "");
+ sprintf(name, "pmem%d%s",
+ nd_region->id, suffix ? suffix : "");
} else if (is_namespace_blk(&ndns->dev)) {
struct nd_namespace_blk *nsblk;
nsblk = to_nd_namespace_blk(&ndns->dev);
- sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
- suffix ? suffix : "");
+ sprintf(name, "ndblk%d.%d%s",
+ nd_region->id, nsblk->id, suffix ? suffix : "");
} else {
return NULL;
}
@@ -408,8 +408,8 @@ static int nd_namespace_label_update(struct nd_region *nd_region,
return -ENXIO;
}
-static ssize_t alt_name_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static ssize_t alt_name_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
@@ -448,9 +448,8 @@ static ssize_t alt_name_show(struct device *dev,
}
static DEVICE_ATTR_RW(alt_name);
-static int scan_free(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
- resource_size_t n)
+static int scan_free(struct nd_region *nd_region, struct nd_mapping *nd_mapping,
+ struct nd_label_id *label_id, resource_size_t n)
{
bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -507,7 +506,8 @@ static int scan_free(struct nd_region *nd_region,
* set.
*/
static int shrink_dpa_allocation(struct nd_region *nd_region,
- struct nd_label_id *label_id, resource_size_t n)
+ struct nd_label_id *label_id,
+ resource_size_t n)
{
int i;
@@ -524,7 +524,8 @@ static int shrink_dpa_allocation(struct nd_region *nd_region,
}
static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
- struct nd_region *nd_region, struct nd_mapping *nd_mapping,
+ struct nd_region *nd_region,
+ struct nd_mapping *nd_mapping,
resource_size_t n)
{
bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
@@ -616,7 +617,8 @@ enum alloc_loc {
};
static resource_size_t scan_allocate(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
+ struct nd_mapping *nd_mapping,
+ struct nd_label_id *label_id,
resource_size_t n)
{
resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
@@ -626,9 +628,10 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
const resource_size_t to_allocate = n;
int first;
- for_each_dpa_resource(ndd, res)
+ for_each_dpa_resource(ndd, res) {
if (strcmp(label_id->id, res->name) == 0)
exist = res;
+ }
valid.start = nd_mapping->start;
valid.end = mapping_end;
@@ -698,8 +701,9 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
case ALLOC_MID:
if (strcmp(next->name, label_id->id) == 0) {
/* adjust next resource up */
- rc = adjust_resource(next, next->start
- - allocate, resource_size(next)
+ rc = adjust_resource(next,
+ next->start - allocate,
+ resource_size(next)
+ allocate);
new_res = next;
action = "next grow up";
@@ -730,8 +734,8 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
rc = -EBUSY;
} else if (strcmp(action, "grow down") == 0) {
/* adjust current resource down */
- rc = adjust_resource(res, res->start, resource_size(res)
- + allocate);
+ rc = adjust_resource(res, res->start,
+ resource_size(res) + allocate);
if (rc == 0)
res->flags |= DPA_RESOURCE_ADJUSTED;
}
@@ -771,7 +775,8 @@ static resource_size_t scan_allocate(struct nd_region *nd_region,
}
static int merge_dpa(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
+ struct nd_mapping *nd_mapping,
+ struct nd_label_id *label_id)
{
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
struct resource *res;
@@ -831,8 +836,7 @@ int __reserve_free_pmem(struct device *dev, void *data)
rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
dev_WARN_ONCE(&nd_region->dev, rem,
"pmem reserve underrun: %#llx of %#llx bytes\n",
- (unsigned long long)n - rem,
- (unsigned long long)n);
+ (u64)n - rem, (u64)n);
return rem ? -ENXIO : 0;
}
@@ -912,8 +916,7 @@ static int grow_dpa_allocation(struct nd_region *nd_region,
dev_WARN_ONCE(&nd_region->dev, rem,
"allocation underrun: %#llx of %#llx bytes\n",
- (unsigned long long)n - rem,
- (unsigned long long)n);
+ (u64)n - rem, (u64)n);
if (rem)
return -ENXIO;
@@ -926,7 +929,8 @@ static int grow_dpa_allocation(struct nd_region *nd_region,
}
static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
- struct nd_namespace_pmem *nspm, resource_size_t size)
+ struct nd_namespace_pmem *nspm,
+ resource_size_t size)
{
struct resource *res = &nspm->nsio.res;
resource_size_t offset = 0;
@@ -1073,8 +1077,8 @@ static ssize_t __size_store(struct device *dev, unsigned long long val)
return rc;
}
-static ssize_t size_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static ssize_t size_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
unsigned long long val;
@@ -1174,8 +1178,8 @@ EXPORT_SYMBOL(nvdimm_namespace_locked);
static ssize_t size_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
- return sprintf(buf, "%llu\n", (unsigned long long)
- nvdimm_namespace_capacity(to_ndns(dev)));
+ return sprintf(buf, "%llu\n",
+ (u64)nvdimm_namespace_capacity(to_ndns(dev)));
}
static DEVICE_ATTR(size, 0444, size_show, size_store);
@@ -1215,7 +1219,8 @@ static ssize_t uuid_show(struct device *dev,
* @old_uuid: reference to the uuid storage location in the namespace object
*/
static int namespace_update_uuid(struct nd_region *nd_region,
- struct device *dev, u8 *new_uuid, u8 **old_uuid)
+ struct device *dev,
+ u8 *new_uuid, u8 **old_uuid)
{
u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
struct nd_label_id old_label_id;
@@ -1281,8 +1286,8 @@ static int namespace_update_uuid(struct nd_region *nd_region,
return 0;
}
-static ssize_t uuid_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static ssize_t uuid_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
u8 *uuid = NULL;
@@ -1314,8 +1319,8 @@ static ssize_t uuid_store(struct device *dev,
rc = nd_namespace_label_update(nd_region, dev);
else
kfree(uuid);
- dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ dev_dbg(dev, "result: %zd wrote: %s%s",
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -1343,14 +1348,17 @@ static ssize_t resource_show(struct device *dev,
/* no address to convey if the namespace has no allocation */
if (resource_size(res) == 0)
return -ENXIO;
- return sprintf(buf, "%#llx\n", (unsigned long long)res->start);
+ return sprintf(buf, "%#llx\n", (u64)res->start);
}
static DEVICE_ATTR_RO(resource);
-static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
- 4096, 4104, 4160, 4224, 0 };
+static const unsigned long blk_lbasize_supported[] = {
+ 512, 520, 528, 4096, 4104, 4160, 4224, 0
+};
-static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
+static const unsigned long pmem_lbasize_supported[] = {
+ 512, 4096, 0
+};
static ssize_t sector_size_show(struct device *dev,
struct device_attribute *attr, char *buf)
@@ -1372,7 +1380,8 @@ static ssize_t sector_size_show(struct device *dev,
}
static ssize_t sector_size_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
const unsigned long *supported;
@@ -1401,7 +1410,8 @@ static ssize_t sector_size_store(struct device *dev,
rc = nd_size_select_store(dev, buf, lbasize, supported);
if (rc >= 0)
rc = nd_namespace_label_update(nd_region, dev);
- dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
+ dev_dbg(dev, "result: %zd %s: %s%s",
+ rc, rc < 0 ? "tried" : "wrote",
buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -1553,7 +1563,8 @@ static ssize_t __holder_class_store(struct device *dev, const char *buf)
}
static ssize_t holder_class_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct nd_region *nd_region = to_nd_region(dev->parent);
ssize_t rc;
@@ -1623,7 +1634,8 @@ static ssize_t mode_show(struct device *dev,
static DEVICE_ATTR_RO(mode);
static ssize_t force_raw_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
bool force_raw;
int rc = strtobool(buf, &force_raw);
@@ -2190,8 +2202,8 @@ void nd_region_create_btt_seed(struct nd_region *nd_region)
}
static int add_namespace_resource(struct nd_region *nd_region,
- struct nd_namespace_label *nd_label, struct device **devs,
- int count)
+ struct nd_namespace_label *nd_label,
+ struct device **devs, int count)
{
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
@@ -2228,7 +2240,8 @@ static int add_namespace_resource(struct nd_region *nd_region,
}
static struct device *create_namespace_blk(struct nd_region *nd_region,
- struct nd_namespace_label *nd_label, int count)
+ struct nd_namespace_label *nd_label,
+ int count)
{
struct nd_mapping *nd_mapping = &nd_region->mapping[0];
struct nd_interleave_set *nd_set = nd_region->nd_set;
@@ -2333,7 +2346,8 @@ static struct device **scan_labels(struct nd_region *nd_region)
continue;
/* skip labels that describe extents outside of the region */
- if (nd_label->dpa < nd_mapping->start || nd_label->dpa > map_end)
+ if (nd_label->dpa < nd_mapping->start ||
+ nd_label->dpa > map_end)
continue;
i = add_namespace_resource(nd_region, nd_label, devs, count);
@@ -2494,7 +2508,8 @@ static int init_active_labels(struct nd_region *nd_region)
test_bit(NDD_ALIASING, &nvdimm->flags)) {
/* labels needed to disambiguate dpa */
- dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
+ dev_err(&nd_region->dev,
+ "%s: is %s, failing probe\n",
dev_name(&nd_mapping->nvdimm->dev),
test_bit(NDD_LOCKED, &nvdimm->flags)
? "locked" : "disabled");
diff --git a/drivers/nvdimm/nd-core.h b/drivers/nvdimm/nd-core.h
index 3b48fba4629b..15bbdf6bea24 100644
--- a/drivers/nvdimm/nd-core.h
+++ b/drivers/nvdimm/nd-core.h
@@ -62,8 +62,7 @@ static inline unsigned long nvdimm_security_flags(
flags = nvdimm->sec.ops->get_flags(nvdimm, ptype);
/* disabled, locked, unlocked, and overwrite are mutually exclusive */
dev_WARN_ONCE(&nvdimm->dev, hweight64(flags & state_flags) > 1,
- "reported invalid security state: %#llx\n",
- (unsigned long long)flags);
+ "reported invalid security state: %#llx\n", (u64)flags);
return flags;
}
@@ -150,7 +149,8 @@ resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
struct nd_mapping *nd_mapping);
resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region);
resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
- struct nd_mapping *nd_mapping, resource_size_t *overlap);
+ struct nd_mapping *nd_mapping,
+ resource_size_t *overlap);
resource_size_t nd_blk_available_dpa(struct nd_region *nd_region);
resource_size_t nd_region_available_dpa(struct nd_region *nd_region);
int nd_region_conflict(struct nd_region *nd_region, resource_size_t start,
@@ -159,7 +159,8 @@ resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
struct nd_label_id *label_id);
int alias_dpa_busy(struct device *dev, void *data);
struct resource *nsblk_add_resource(struct nd_region *nd_region,
- struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
+ struct nvdimm_drvdata *ndd,
+ struct nd_namespace_blk *nsblk,
resource_size_t start);
int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd);
void get_ndd(struct nvdimm_drvdata *ndd);
@@ -171,8 +172,8 @@ bool nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
bool __nd_attach_ndns(struct device *dev, struct nd_namespace_common *attach,
struct nd_namespace_common **_ndns);
ssize_t nd_namespace_store(struct device *dev,
- struct nd_namespace_common **_ndns, const char *buf,
- size_t len);
+ struct nd_namespace_common **_ndns,
+ const char *buf, size_t len);
struct nd_pfn *to_nd_pfn_safe(struct device *dev);
bool is_nvdimm_bus(struct device *dev);
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index d434041ca2e5..852ce9591109 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -90,8 +90,8 @@ unsigned int sizeof_namespace_label(struct nvdimm_drvdata *ndd);
#define nd_dbg_dpa(r, d, res, fmt, arg...) \
dev_dbg((r) ? &(r)->dev : (d)->dev, "%s: %.13s: %#llx @ %#llx " fmt, \
(r) ? dev_name((d)->dev) : "", res ? res->name : "null", \
- (unsigned long long)(res ? resource_size(res) : 0), \
- (unsigned long long)(res ? res->start : 0), ##arg)
+ (u64)(res ? resource_size(res) : 0), \
+ (u64)(res ? res->start : 0), ##arg)
#define for_each_dpa_resource(ndd, res) \
for (res = (ndd)->dpa.child; res; res = res->sibling)
@@ -228,7 +228,8 @@ int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
ssize_t nd_size_select_show(unsigned long current_size,
const unsigned long *supported, char *buf);
ssize_t nd_size_select_store(struct device *dev, const char *buf,
- unsigned long *current_size, const unsigned long *supported);
+ unsigned long *current_size,
+ const unsigned long *supported);
int __init nvdimm_init(void);
int __init nd_region_init(void);
int __init nd_label_init(void);
@@ -363,7 +364,8 @@ void put_ndd(struct nvdimm_drvdata *ndd);
int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd);
void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res);
struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
- struct nd_label_id *label_id, resource_size_t start,
+ struct nd_label_id *label_id,
+ resource_size_t start,
resource_size_t n);
resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns);
bool nvdimm_namespace_locked(struct nd_namespace_common *ndns);
@@ -374,7 +376,8 @@ const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
char *name);
unsigned int pmem_sector_size(struct nd_namespace_common *ndns);
void nvdimm_badblocks_populate(struct nd_region *nd_region,
- struct badblocks *bb, const struct resource *res);
+ struct badblocks *bb,
+ const struct resource *res);
#if IS_ENABLED(CONFIG_ND_CLAIM)
int nvdimm_setup_pfn(struct nd_pfn *nd_pfn, struct dev_pagemap *pgmap);
int devm_nsio_enable(struct device *dev, struct nd_namespace_io *nsio);
@@ -423,15 +426,14 @@ static inline void nd_iostat_end(struct bio *bio, unsigned long start)
static inline bool is_bad_pmem(struct badblocks *bb, sector_t sector,
unsigned int len)
{
- if (bb->count) {
- sector_t first_bad;
- int num_bad;
+ sector_t first_bad;
+ int num_bad;
- return !!badblocks_check(bb, sector, len / 512, &first_bad,
- &num_bad);
- }
+ if (!bb->count)
+ return false;
- return false;
+
+ return badblocks_check(bb, sector, len / 512, &first_bad, &num_bad);
}
resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk);
diff --git a/drivers/nvdimm/nd_virtio.c b/drivers/nvdimm/nd_virtio.c
index f09541bf3d5d..1a792fee8cfd 100644
--- a/drivers/nvdimm/nd_virtio.c
+++ b/drivers/nvdimm/nd_virtio.c
@@ -24,7 +24,8 @@ void virtio_pmem_host_ack(struct virtqueue *vq)
if (!list_empty(&vpmem->req_list)) {
req_buf = list_first_entry(&vpmem->req_list,
- struct virtio_pmem_request, list);
+ struct virtio_pmem_request,
+ list);
req_buf->wq_buf_avail = true;
wake_up(&req_buf->wq_buf);
list_del(&req_buf->list);
diff --git a/drivers/nvdimm/pfn_devs.c b/drivers/nvdimm/pfn_devs.c
index 7226d6d95899..6ab72f8f4a66 100644
--- a/drivers/nvdimm/pfn_devs.c
+++ b/drivers/nvdimm/pfn_devs.c
@@ -61,8 +61,8 @@ static ssize_t mode_show(struct device *dev,
}
}
-static ssize_t mode_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+static ssize_t mode_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc = 0;
@@ -87,8 +87,8 @@ static ssize_t mode_store(struct device *dev,
rc = -EINVAL;
}
}
- dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ dev_dbg(dev, "result: %zd wrote: %s%s",
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -137,8 +137,8 @@ static ssize_t align_store(struct device *dev,
nvdimm_bus_lock(dev);
rc = nd_size_select_store(dev, buf, &nd_pfn->align,
nd_pfn_supported_alignments());
- dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ dev_dbg(dev, "result: %zd wrote: %s%s",
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -164,8 +164,8 @@ static ssize_t uuid_store(struct device *dev,
nd_device_lock(dev);
rc = nd_uuid_store(dev, &nd_pfn->uuid, buf, len);
- dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ dev_dbg(dev, "result: %zd wrote: %s%s",
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
nd_device_unlock(dev);
return rc ? rc : len;
@@ -179,14 +179,15 @@ static ssize_t namespace_show(struct device *dev,
ssize_t rc;
nvdimm_bus_lock(dev);
- rc = sprintf(buf, "%s\n", nd_pfn->ndns
- ? dev_name(&nd_pfn->ndns->dev) : "");
+ rc = sprintf(buf, "%s\n",
+ nd_pfn->ndns ? dev_name(&nd_pfn->ndns->dev) : "");
nvdimm_bus_unlock(dev);
return rc;
}
static ssize_t namespace_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
struct nd_pfn *nd_pfn = to_nd_pfn_safe(dev);
ssize_t rc;
@@ -194,8 +195,8 @@ static ssize_t namespace_store(struct device *dev,
nd_device_lock(dev);
nvdimm_bus_lock(dev);
rc = nd_namespace_store(dev, &nd_pfn->ndns, buf, len);
- dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
- buf[len - 1] == '\n' ? "" : "\n");
+ dev_dbg(dev, "result: %zd wrote: %s%s",
+ rc, buf, buf[len - 1] == '\n' ? "" : "\n");
nvdimm_bus_unlock(dev);
nd_device_unlock(dev);
@@ -217,8 +218,8 @@ static ssize_t resource_show(struct device *dev,
u32 start_pad = __le32_to_cpu(pfn_sb->start_pad);
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- rc = sprintf(buf, "%#llx\n", (unsigned long long)nsio->res.start
- + start_pad + offset);
+ rc = sprintf(buf, "%#llx\n",
+ (u64)nsio->res.start + start_pad + offset);
} else {
/* no address to convey if the pfn instance is disabled */
rc = -ENXIO;
@@ -244,9 +245,9 @@ static ssize_t size_show(struct device *dev,
u32 end_trunc = __le32_to_cpu(pfn_sb->end_trunc);
struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
- rc = sprintf(buf, "%llu\n", (unsigned long long)
- resource_size(&nsio->res) - start_pad
- - end_trunc - offset);
+ rc = sprintf(buf, "%llu\n",
+ (u64)resource_size(&nsio->res)
+ - start_pad - end_trunc - offset);
} else {
/* no size to convey if the pfn instance is disabled */
rc = -ENXIO;
@@ -258,7 +259,8 @@ static ssize_t size_show(struct device *dev,
static DEVICE_ATTR_RO(size);
static ssize_t supported_alignments_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
return nd_size_select_show(0, nd_pfn_supported_alignments(), buf);
}
@@ -387,7 +389,8 @@ static int nd_pfn_clear_memmap_errors(struct nd_pfn *nd_pfn)
dev_dbg(&nd_pfn->dev, "meta: %x badblocks at %llx\n",
num_bad, first_bad);
nsoff = ALIGN_DOWN((nd_region->ndr_start
- + (first_bad << 9)) - nsio->res.start,
+ + (first_bad << 9))
+ - nsio->res.start,
PAGE_SIZE);
zero_len = ALIGN(num_bad << 9, PAGE_SIZE);
while (zero_len) {
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 64e7429edcc2..3f1add94144a 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -85,8 +85,7 @@ static blk_status_t pmem_clear_poison(struct pmem_device *pmem,
hwpoison_clear(pmem, pmem->phys_addr + offset, cleared);
cleared /= 512;
dev_dbg(dev, "%#llx clear %ld sector%s\n",
- (unsigned long long)sector, cleared,
- cleared > 1 ? "s" : "");
+ (u64)sector, cleared, cleared > 1 ? "s" : "");
badblocks_clear(&pmem->bb, sector, cleared);
if (pmem->bb_state)
sysfs_notify_dirent(pmem->bb_state);
@@ -138,8 +137,8 @@ static blk_status_t read_pmem(struct page *page, unsigned int off,
}
static blk_status_t pmem_do_bvec(struct pmem_device *pmem, struct page *page,
- unsigned int len, unsigned int off, unsigned int op,
- sector_t sector)
+ unsigned int len, unsigned int off,
+ unsigned int op, sector_t sector)
{
blk_status_t rc = BLK_STS_OK;
bool bad_pmem = false;
@@ -270,7 +269,8 @@ static const struct block_device_operations pmem_fops = {
};
static long pmem_dax_direct_access(struct dax_device *dax_dev,
- pgoff_t pgoff, long nr_pages, void **kaddr, pfn_t *pfn)
+ pgoff_t pgoff, long nr_pages,
+ void **kaddr, pfn_t *pfn)
{
struct pmem_device *pmem = dax_get_private(dax_dev);
@@ -455,8 +455,8 @@ static int pmem_attach_disk(struct device *dev,
disk->flags = GENHD_FL_EXT_DEVT;
disk->queue->backing_dev_info->capabilities |= BDI_CAP_SYNCHRONOUS_IO;
nvdimm_namespace_disk_name(ndns, disk->disk_name);
- set_capacity(disk, (pmem->size - pmem->pfn_pad - pmem->data_offset)
- / 512);
+ set_capacity(disk,
+ (pmem->size - pmem->pfn_pad - pmem->data_offset) / 512);
if (devm_init_badblocks(dev, &pmem->bb))
return -ENOMEM;
nvdimm_badblocks_populate(nd_region, &pmem->bb, &bb_res);
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index 6861e0997d21..6ed918e30cf9 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -2,6 +2,7 @@
/*
* Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
*/
+
#include <linux/scatterlist.h>
#include <linux/highmem.h>
#include <linux/sched.h>
@@ -44,16 +45,15 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
}
if (j < i)
- flush_page = (void __iomem *)((unsigned long)
- ndrd_get_flush_wpq(ndrd, dimm, j)
- & PAGE_MASK);
+ flush_page = (void __iomem *)
+ ((unsigned long)ndrd_get_flush_wpq(ndrd, dimm, j) & PAGE_MASK);
else
- flush_page = devm_nvdimm_ioremap(dev,
- PFN_PHYS(pfn), PAGE_SIZE);
+ flush_page = devm_nvdimm_ioremap(dev, PFN_PHYS(pfn),
+ PAGE_SIZE);
if (!flush_page)
return -ENXIO;
- ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
- + (res->start & ~PAGE_MASK));
+ ndrd_set_flush_wpq(ndrd, dimm, i,
+ flush_page + (res->start & ~PAGE_MASK));
}
return 0;
@@ -276,7 +276,8 @@ static ssize_t deep_flush_show(struct device *dev,
return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
}
-static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
+static ssize_t deep_flush_store(struct device *dev,
+ struct device_attribute *attr,
const char *buf, size_t len)
{
bool flush;
@@ -435,7 +436,8 @@ static ssize_t available_size_show(struct device *dev,
static DEVICE_ATTR_RO(available_size);
static ssize_t max_available_extent_show(struct device *dev,
- struct device_attribute *attr, char *buf)
+ struct device_attribute *attr,
+ char *buf)
{
struct nd_region *nd_region = to_nd_region(dev);
unsigned long long available = 0;
@@ -544,7 +546,8 @@ static ssize_t read_only_show(struct device *dev,
}
static ssize_t read_only_store(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t len)
+ struct device_attribute *attr,
+ const char *buf, size_t len)
{
bool ro;
int rc = strtobool(buf, &ro);
@@ -813,12 +816,13 @@ static ssize_t mappingN(struct device *dev, char *buf, int n)
}
#define REGION_MAPPING(idx) \
- static ssize_t mapping##idx##_show(struct device *dev, \
- struct device_attribute *attr, char *buf) \
- { \
- return mappingN(dev, buf, idx); \
- } \
- static DEVICE_ATTR_RO(mapping##idx)
+static ssize_t mapping##idx##_show(struct device *dev, \
+ struct device_attribute *attr, \
+ char *buf) \
+{ \
+ return mappingN(dev, buf, idx); \
+} \
+static DEVICE_ATTR_RO(mapping##idx)
/*
* 32 should be enough for a while, even in the presence of socket
diff --git a/drivers/nvdimm/security.c b/drivers/nvdimm/security.c
index 693416001d17..cb14c05f127e 100644
--- a/drivers/nvdimm/security.c
+++ b/drivers/nvdimm/security.c
@@ -118,7 +118,8 @@ static struct key *nvdimm_lookup_user_key(struct nvdimm *nvdimm,
}
static const void *nvdimm_get_user_key_payload(struct nvdimm *nvdimm,
- key_serial_t id, int subclass, struct key **key)
+ key_serial_t id, int subclass,
+ struct key **key)
{
*key = NULL;
if (id == 0) {
@@ -229,7 +230,7 @@ static int check_security_state(struct nvdimm *nvdimm)
}
if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
- dev_dbg(dev, "Security operation in progress.\n");
+ dev_dbg(dev, "Security operation in progress\n");
return -EBUSY;
}
@@ -262,8 +263,8 @@ static int security_disable(struct nvdimm *nvdimm, unsigned int keyid)
return -ENOKEY;
rc = nvdimm->sec.ops->disable(nvdimm, data);
- dev_dbg(dev, "key: %d disable: %s\n", key_serial(key),
- rc == 0 ? "success" : "fail");
+ dev_dbg(dev, "key: %d disable: %s\n",
+ key_serial(key), rc == 0 ? "success" : "fail");
nvdimm_put_key(key);
nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
@@ -316,8 +317,7 @@ static int security_update(struct nvdimm *nvdimm, unsigned int keyid,
nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm,
NVDIMM_MASTER);
else
- nvdimm->sec.flags = nvdimm_security_flags(nvdimm,
- NVDIMM_USER);
+ nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
return rc;
}
@@ -344,8 +344,7 @@ static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
if (!test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.ext_flags) &&
pass_type == NVDIMM_MASTER) {
- dev_dbg(dev,
- "Attempt to secure erase in wrong master state.\n");
+ dev_dbg(dev, "Attempt to secure erase in wrong master state\n");
return -EOPNOTSUPP;
}
@@ -355,7 +354,8 @@ static int security_erase(struct nvdimm *nvdimm, unsigned int keyid,
return -ENOKEY;
rc = nvdimm->sec.ops->erase(nvdimm, data, pass_type);
- dev_dbg(dev, "key: %d erase%s: %s\n", key_serial(key),
+ dev_dbg(dev, "key: %d erase%s: %s\n",
+ key_serial(key),
pass_type == NVDIMM_MASTER ? "(master)" : "(user)",
rc == 0 ? "success" : "fail");
@@ -381,7 +381,7 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
return -EOPNOTSUPP;
if (dev->driver == NULL) {
- dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
+ dev_dbg(dev, "Unable to overwrite while DIMM active\n");
return -EINVAL;
}
@@ -395,8 +395,8 @@ static int security_overwrite(struct nvdimm *nvdimm, unsigned int keyid)
return -ENOKEY;
rc = nvdimm->sec.ops->overwrite(nvdimm, data);
- dev_dbg(dev, "key: %d overwrite submission: %s\n", key_serial(key),
- rc == 0 ? "success" : "fail");
+ dev_dbg(dev, "key: %d overwrite submission: %s\n",
+ key_serial(key), rc == 0 ? "success" : "fail");
nvdimm_put_key(key);
if (rc == 0) {
@@ -533,7 +533,7 @@ ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
} else if (i == OP_ERASE || i == OP_MASTER_ERASE) {
dev_dbg(dev, "%s %u\n", ops[i].name, key);
if (atomic_read(&nvdimm->busy)) {
- dev_dbg(dev, "Unable to secure erase while DIMM active.\n");
+ dev_dbg(dev, "Unable to secure erase while DIMM active\n");
return -EBUSY;
}
rc = security_erase(nvdimm, key, i == OP_ERASE
@@ -541,7 +541,7 @@ ssize_t nvdimm_security_store(struct device *dev, const char *buf, size_t len)
} else if (i == OP_OVERWRITE) {
dev_dbg(dev, "overwrite %u\n", key);
if (atomic_read(&nvdimm->busy)) {
- dev_dbg(dev, "Unable to overwrite while DIMM active.\n");
+ dev_dbg(dev, "Unable to overwrite while DIMM active\n");
return -EBUSY;
}
rc = security_overwrite(nvdimm, key);
diff --git a/drivers/nvdimm/virtio_pmem.c b/drivers/nvdimm/virtio_pmem.c
index ce2181e06756..087753ac81a0 100644
--- a/drivers/nvdimm/virtio_pmem.c
+++ b/drivers/nvdimm/virtio_pmem.c
@@ -19,7 +19,8 @@ static int init_vq(struct virtio_pmem *vpmem)
{
/* single vq */
vpmem->req_vq = virtio_find_single_vq(vpmem->vdev,
- virtio_pmem_host_ack, "flush_queue");
+ virtio_pmem_host_ack,
+ "flush_queue");
if (IS_ERR(vpmem->req_vq))
return PTR_ERR(vpmem->req_vq);
@@ -91,6 +92,7 @@ static int virtio_pmem_probe(struct virtio_device *vdev)
}
nd_region->provider_data = dev_to_virtio(nd_region->dev.parent->parent);
return 0;
+
out_nd:
nvdimm_bus_unregister(vpmem->nvdimm_bus);
out_vq:
--
2.15.0
Typing is hard.
Signed-off-by: Joe Perches <[email protected]>
---
drivers/nvdimm/namespace_devs.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/nvdimm/namespace_devs.c b/drivers/nvdimm/namespace_devs.c
index 7a16340f9853..253f07d97b73 100644
--- a/drivers/nvdimm/namespace_devs.c
+++ b/drivers/nvdimm/namespace_devs.c
@@ -1718,7 +1718,7 @@ struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
return ERR_PTR(-ENODEV);
/*
- * Flush any in-progess probes / removals in the driver
+ * Flush any in-progress probes / removals in the driver
* for the raw personality of this namespace.
*/
nd_device_lock(&ndns->dev);
--
2.15.0
Use the more common kernel type.
Signed-off-by: Joe Perches <[email protected]>
---
drivers/nvdimm/label.c | 2 +-
drivers/nvdimm/nd.h | 6 +++---
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/drivers/nvdimm/label.c b/drivers/nvdimm/label.c
index 2c780c5352dc..5700d9b35b8f 100644
--- a/drivers/nvdimm/label.c
+++ b/drivers/nvdimm/label.c
@@ -34,7 +34,7 @@ static u32 best_seq(u32 a, u32 b)
return a;
}
-unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
+unsigned int sizeof_namespace_label(struct nvdimm_drvdata *ndd)
{
return ndd->nslabel_size;
}
diff --git a/drivers/nvdimm/nd.h b/drivers/nvdimm/nd.h
index c10a4b94d44a..1636061b1f93 100644
--- a/drivers/nvdimm/nd.h
+++ b/drivers/nvdimm/nd.h
@@ -81,7 +81,7 @@ static inline struct nd_namespace_index *to_next_namespace_index(
return to_namespace_index(ndd, ndd->ns_next);
}
-unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd);
+unsigned int sizeof_namespace_label(struct nvdimm_drvdata *ndd);
#define namespace_label_has(ndd, field) \
(offsetof(struct nd_namespace_label, field) \
@@ -170,9 +170,9 @@ struct nd_blk_region {
/*
* Lookup next in the repeating sequence of 01, 10, and 11.
*/
-static inline unsigned nd_inc_seq(unsigned seq)
+static inline unsigned int nd_inc_seq(unsigned int seq)
{
- static const unsigned next[] = { 0, 2, 3, 1 };
+ static const unsigned int next[] = { 0, 2, 3, 1 };
return next[seq & 3];
}
--
2.15.0
On Wed, 2019-09-11 at 19:54 -0700, Joe Perches wrote:
> Avoid using uncommon logic testing styles to make the code a
> bit more like other kernel code.
>
> e.g.:
> if (foo) {
> ;
> } else {
> <code>
> }
>
> is typically written
>
> if (!foo) {
> <code>
> }
>
A lot of times the excessive inversions seem to result in a net loss of
readability - e.g.:
<snip>
> diff --git a/drivers/nvdimm/region_devs.c
> b/drivers/nvdimm/region_devs.c
> index 65df07481909..6861e0997d21 100644
> --- a/drivers/nvdimm/region_devs.c
> +++ b/drivers/nvdimm/region_devs.c
> @@ -320,9 +320,7 @@ static ssize_t set_cookie_show(struct device *dev,
> struct nd_interleave_set *nd_set = nd_region->nd_set;
> ssize_t rc = 0;
>
> - if (is_memory(dev) && nd_set)
> - /* pass, should be precluded by region_visible */;
For one, the comment is lost
> - else
> + if (!(is_memory(dev) && nd_set))
And it takes a moment to resolve between things such as:
if (!(A && B))
vs.
if (!(A) && B)
And this is especially true if 'A' and 'B' are longer function calls,
split over multiple lines, or are themselves compound 'sections'.
I'm not opposed to /all/ such transformations -- for example, the ones
where the logical inversion can be 'consumed' by toggling a comparision
operator, as you have a few times in this patch, don't sacrifice any
readibility, and perhaps even improve it.
> return -ENXIO;
>
> /*
Hi Joe,
On Wed, Sep 11, 2019 at 7:55 PM Joe Perches <[email protected]> wrote:
>
> Rather than have a local coding style, use the typical kernel style.
I'd rather automate this. I'm going to do once-over with clang-format
and see what falls out.
On Thu, Sep 12, 2019 at 10:15 AM Joe Perches <[email protected]> wrote:
>
> I am adding Miguel Ojeda to the cc's.
Thanks Joe!
> Of course you are welcome to try it, but I believe that
> clang-format doesn't work all that well yet.
>
> It's more a work in progress rather than a "standard".
>
> I believe you'll find that the patch series I sent
> ends up with a rather more typical kernel style.
>
> I suggest you try to apply the series I sent and then
> run clang-format on that and see the differences.
Indeed, it is not there just yet. There are a few differences w.r.t.
the kernel style that aren't supported yet. However, for block/batch
conversions, it is very useful.
Luckily, one of the biggest ones (the consecutive macros alignment,
and we have a lot of them given this is C and a kernel) is going away
with LLVM 9 which is about to be released next week.
> Ideally one day, something tool like clang-format
> might be locally applied by every developer for their
> own personal style with some other neutral style the
> content actually distributed.
If that day comes, I hope we can all agree to a single format and
apply it everywhere as other major projects have done. I think
agreeing to a given style is much, much easier for any of us when
formatting is fully automatic -- because at that point you don't need
to spend mental cycles (and memory!) on it. :-)
If I had to guess, I would say the path forward will start with some
subsystem maintainers starting to apply clang-format systematically on
their trees. That is why I think it is very useful that Dan tries it
out and let us know his impressions.
Cheers,
Miguel
> static void append_badrange_entry(struct badrange *badrange,
> - struct badrange_entry *bre, u64 addr, u64 length)
> + struct badrange_entry *bre, u64 addr, u64 length)
Please stop sending this kind of crap. Two tabs are a very common
style used in a lot of the kernel, and some people actually prefer it.
Instead of arguing what is better just stick to what the surrounding
code does.
Or in other words: Feel free to be a codingstyle nazi for your code
(I am for some of mine), but leave others peoples code alone with
"cleanup" patches.
On Thu, Sep 12, 2019 at 7:06 AM Johannes Thumshirn <[email protected]> wrote:
>
> On 12/09/2019 16:00, Jeff Moyer wrote:
> > I'd rather avoid the churn and the risk of
> > introducing regressions. This will also make backports to stable more
> > of a pain, so it isn't without cost. Dan, is this really something you
> > want to do?
>
> I'm a 100% with Jeff on this!
Agree, see my other response here:
https://lore.kernel.org/r/CAPcyv4iu13D5P+ExdeW8OGMV8g49fMUy52xbYZM+bewwVSwhjg@mail.gmail.com/
On Thu, 2019-09-12 at 05:17 -0700, Christoph Hellwig wrote:
> Instead of arguing what is better just stick to what the surrounding
> code does.
That's not always feasible nor readable.
Especially for the logic inversion blocks where
the existing code does unreadable and error prone
things like hiding semicolons immediately after
comments.
if (foo)
/* longish comment */;
else {
<code>;
}
> Or in other words: Feel free to be a codingstyle nazi for your code
> (I am for some of mine), but leave others peoples code alone with
> "cleanup" patches.
My point was to avoid documenting per-subsystem
coding style rules.
On Thu, 2019-09-12 at 01:00 -0700, Dan Williams wrote:
> Hi Joe,
>
> On Wed, Sep 11, 2019 at 7:55 PM Joe Perches <[email protected]> wrote:
> > Rather than have a local coding style, use the typical kernel style.
>
> I'd rather automate this. I'm going to do once-over with clang-format
> and see what falls out.
I am adding Miguel Ojeda to the cc's.
Of course you are welcome to try it, but I believe that
clang-format doesn't work all that well yet.
It's more a work in progress rather than a "standard".
I believe you'll find that the patch series I sent
ends up with a rather more typical kernel style.
I suggest you try to apply the series I sent and then
run clang-format on that and see the differences.
Ideally one day, something tool like clang-format
might be locally applied by every developer for their
own personal style with some other neutral style the
content actually distributed.
cheers, Joe
Joe Perches <[email protected]> writes:
> Rather than have a local coding style, use the typical kernel style.
The coding style isn't that different from the core kernel, and it's
still quite readable. I'd rather avoid the churn and the risk of
introducing regressions. This will also make backports to stable more
of a pain, so it isn't without cost. Dan, is this really something you
want to do?
-Jeff
>
> Joe Perches (13):
> nvdimm: Use more typical whitespace
> nvdimm: Move logical continuations to previous line
> nvdimm: Use octal permissions
> nvdimm: Use a more common kernel spacing style
> nvdimm: Use "unsigned int" in preference to "unsigned"
> nvdimm: Add and remove blank lines
> nvdimm: Use typical kernel brace styles
> nvdimm: Use typical kernel style indentation
> nvdimm: btt.h: Neaten #defines to improve readability
> nvdimm: namespace_devs: Move assignment operators
> nvdimm: Use more common logic testing styles and bare ; positions
> nvdimm: namespace_devs: Change progess typo to progress
> nvdimm: Miscellaneous neatening
>
> drivers/nvdimm/badrange.c | 22 +-
> drivers/nvdimm/blk.c | 39 ++--
> drivers/nvdimm/btt.c | 249 +++++++++++----------
> drivers/nvdimm/btt.h | 56 ++---
> drivers/nvdimm/btt_devs.c | 68 +++---
> drivers/nvdimm/bus.c | 138 ++++++------
> drivers/nvdimm/claim.c | 50 ++---
> drivers/nvdimm/core.c | 42 ++--
> drivers/nvdimm/dax_devs.c | 3 +-
> drivers/nvdimm/dimm.c | 3 +-
> drivers/nvdimm/dimm_devs.c | 107 ++++-----
> drivers/nvdimm/e820.c | 2 +-
> drivers/nvdimm/label.c | 213 +++++++++---------
> drivers/nvdimm/label.h | 6 +-
> drivers/nvdimm/namespace_devs.c | 472 +++++++++++++++++++++-------------------
> drivers/nvdimm/nd-core.h | 31 +--
> drivers/nvdimm/nd.h | 94 ++++----
> drivers/nvdimm/nd_virtio.c | 20 +-
> drivers/nvdimm/of_pmem.c | 6 +-
> drivers/nvdimm/pfn_devs.c | 136 ++++++------
> drivers/nvdimm/pmem.c | 57 ++---
> drivers/nvdimm/pmem.h | 2 +-
> drivers/nvdimm/region.c | 20 +-
> drivers/nvdimm/region_devs.c | 160 +++++++-------
> drivers/nvdimm/security.c | 138 ++++++------
> drivers/nvdimm/virtio_pmem.c | 10 +-
> 26 files changed, 1115 insertions(+), 1029 deletions(-)
On 12/09/2019 16:00, Jeff Moyer wrote:
> I'd rather avoid the churn and the risk of
> introducing regressions. This will also make backports to stable more
> of a pain, so it isn't without cost. Dan, is this really something you
> want to do?
I'm a 100% with Jeff on this!
--
Johannes Thumshirn SUSE Labs Filesystems
[email protected] +49 911 74053 689
SUSE Software Solutions Germany GmbH
Maxfeldstr. 5
90409 Nürnberg
Germany
(HRB 247165, AG München)
Key fingerprint = EC38 9CAB C2C4 F25D 8600 D0D0 0393 969D 2D76 0850
On Thu, Sep 12, 2019 at 4:00 PM Jeff Moyer <[email protected]> wrote:
>
> Joe Perches <[email protected]> writes:
>
> > Rather than have a local coding style, use the typical kernel style.
>
> The coding style isn't that different from the core kernel, and it's
> still quite readable. I'd rather avoid the churn and the risk of
> introducing regressions. This will also make backports to stable more
> of a pain, so it isn't without cost. Dan, is this really something you
> want to do?
+1 As soon as you get accustomed to have formatting done and enforced
automatically, it is great. Other major projects have done so for
quite a while now.
If doesn't think it is good enough, please let us know and, if it is
close enough, we can look at going for a newer LLVM to match the style
a bit more. Also note that one can disable formatting for some
sections of code if really needed.
Cheers,
Miguel
On Thu, 2019-09-12 at 16:21 +0200, Miguel Ojeda wrote:
> As soon as you get accustomed to have formatting done and enforced
> automatically, it is great. Other major projects have done so for
> quite a while now.
Please name the major projects and then point to their
.clang-format equivalents.
Also note the size/scope/complexity of the major projects.
thanks.
> If doesn't think it is good enough, please let us know and, if it is
> close enough, we can look at going for a newer LLVM to match the style
> a bit more.
I used the latest one, and quite a bit of the conversion
was unpleasant to read.
> Also note that one can disable formatting for some
> sections of code if really needed.
Marking sections _no_auto_format_ isn't really a
good solution is it?
.
On Thu, 2019-09-12 at 23:58 +0200, Miguel Ojeda wrote:
> On Thu, Sep 12, 2019 at 11:08 PM Joe Perches <[email protected]> wrote:
> > Please name the major projects and then point to their
> > .clang-format equivalents.
> >
> > Also note the size/scope/complexity of the major projects.
>
> Mozilla, WebKit, LLVM and Microsoft. They have their style distributed
> with the official clang-format, not sure if they enforce it.
>
> Same for Chromium/Chrome, but it looks like they indeed enforce it:
thanks for that list.
> > I used the latest one, and quite a bit of the conversion
> > was unpleasant to read.
>
> It would be good to see particularly bad snippets to see if we can do
> something about them (and, if needed, try to improve clang-format to
> support whatever we need).
As I mentioned earlier, look at the __stringify conversion.
Also the C() blocks.
btw: emacs 'mark-whole-buffer indent-region',
the tool I used for each file in patch 1, also
made a mess of the C() block.
> Did you tweak the parameters with the new ones?
No. I used
$ clang-format --version
clang-format version 10.0.0 (git://github.com/llvm/llvm-project.git 305b961f64b75e73110e309341535f6d5a48ed72)
and the existing .clang_format from
next-20190904 35394d031b710e832849fca60d0f53b513f0c390
> I am preparing an RFC
> patch for an updated .clang-format configuration that improves quite a
> bit the results w.r.t. to the current one (and allows for some leeway
> on the developer's side, which helps prevent some cases too).
Well, one day no doubt an automated tool will be
more useful for the kernel. Hope you keep at it
and good luck.
> > Marking sections _no_auto_format_ isn't really a
> > good solution is it?
>
> I am thinking about special tables that are hand-crafted or very
> complex macros. For those, yes, I think it is a fine solution. That is
> why clang-format has that feature to begin with, and you can see an
> example in Mozilla's style guide which points here:
>
> https://github.com/mozilla/gecko-dev/blob/master/xpcom/io/nsEscape.cpp#L22
>
> Cheers,
> Miguel
On Thu, 2019-09-12 at 23:58 +0200, Miguel Ojeda wrote:
> On Thu, Sep 12, 2019 at 11:08 PM Joe Perches <[email protected]> wrote:
> > Please name the major projects and then point to their
> > .clang-format equivalents.
> >
> > Also note the size/scope/complexity of the major projects.
>
> Mozilla, WebKit, LLVM and Microsoft. They have their style distributed
> with the official clang-format, not sure if they enforce it.
At least for LLVM, it appears not.
I just tried a very small portion of the clang compiler:
$ git ls-files llvm/lib/CodeGen/ | wc -l
293
$ git ls-files llvm/lib/CodeGen/ | xargs clang-format -i
and got:
$ git diff --shortstat
245 files changed, 19519 insertions(+), 17794 deletions(-)
btw: that seems a pretty small ~7% of the overall lines
$ git ls-files llvm/lib/CodeGen/ | xargs wc -l | tail -1
251034 total
On Thu, Sep 12, 2019 at 2:58 PM Miguel Ojeda
<[email protected]> wrote:
>
> On Thu, Sep 12, 2019 at 11:08 PM Joe Perches <[email protected]> wrote:
> >
> > Please name the major projects and then point to their
> > .clang-format equivalents.
> >
> > Also note the size/scope/complexity of the major projects.
>
> Mozilla, WebKit, LLVM and Microsoft. They have their style distributed
> with the official clang-format, not sure if they enforce it.
>
> Same for Chromium/Chrome, but it looks like they indeed enforce it:
>
> "A checkout should give you clang-format to automatically format C++
> code. By policy, Clang's formatting of code should always be accepted
> in code reviews."
>
> I would bet other Google projects do so as well (since Chandler
> Carruth has been giving talks about clang-format for 7+ years). Nick?
So Google3 (the internal monorepo that Android, Chromium, ChromiumOS,
Fuchsia are not a part of) is pretty sweet. You cannot even post code
unless the linter has been run on it (presubmit hook), which for our
~350 millions LoC of C++ is clang-format. If you bypass local
presubmit hooks, our code review tool ("critique") won't let you
submit code that fails lint presubmit checks. I suspect the initial
conversion was probably committed by bots.
>
> I hope those are major enough. There is also precedent in other
> languages (e.g. Java, C#, Rust).
Yep! Other people coming to C/C++ from these languages find the
discussion about tabs vs spaces to be highly entertaining! When you
have an automated code formatter and an agreed upon coding style (and
hopefully enforcement), you save so much time from avoided bikesheds!
Don't like the codebase's coding style? Then write the code how you
like and just run the formatter when you're done (might not help with
conventions though, maybe that's where checkpatch.pl can shine).
Done! No more wasted time on what color to paint the bikeshed!
--
Thanks,
~Nick Desaulniers
On Thu, Sep 12, 2019 at 3:38 PM Joe Perches <[email protected]> wrote:
>
> On Thu, 2019-09-12 at 23:58 +0200, Miguel Ojeda wrote:
> > On Thu, Sep 12, 2019 at 11:08 PM Joe Perches <[email protected]> wrote:
> > > Please name the major projects and then point to their
> > > .clang-format equivalents.
> > >
> > > Also note the size/scope/complexity of the major projects.
> >
> > Mozilla, WebKit, LLVM and Microsoft. They have their style distributed
> > with the official clang-format, not sure if they enforce it.
>
> At least for LLVM, it appears not.
I acknowledge the irony you present, but that's because there's no
enforcement on the LLVM side. I frequently forget to run:
$ git-clang-format HEAD~
If you have automated systems that help encourage (ie. force) the use
of the formatter, this helps.
Consider the fact that not all kernel developers run checkpatch.pl.
Is that a deficiency in checkpatch.pl, or the lack of enforcement in
kernel developers' workflows?
--
Thanks,
~Nick Desaulniers
On Thu, Sep 12, 2019 at 11:08 PM Joe Perches <[email protected]> wrote:
>
> Please name the major projects and then point to their
> .clang-format equivalents.
>
> Also note the size/scope/complexity of the major projects.
Mozilla, WebKit, LLVM and Microsoft. They have their style distributed
with the official clang-format, not sure if they enforce it.
Same for Chromium/Chrome, but it looks like they indeed enforce it:
"A checkout should give you clang-format to automatically format C++
code. By policy, Clang's formatting of code should always be accepted
in code reviews."
I would bet other Google projects do so as well (since Chandler
Carruth has been giving talks about clang-format for 7+ years). Nick?
I hope those are major enough. There is also precedent in other
languages (e.g. Java, C#, Rust).
> I used the latest one, and quite a bit of the conversion
> was unpleasant to read.
It would be good to see particularly bad snippets to see if we can do
something about them (and, if needed, try to improve clang-format to
support whatever we need).
Did you tweak the parameters with the new ones? I am preparing an RFC
patch for an updated .clang-format configuration that improves quite a
bit the results w.r.t. to the current one (and allows for some leeway
on the developer's side, which helps prevent some cases too).
> Marking sections _no_auto_format_ isn't really a
> good solution is it?
I am thinking about special tables that are hand-crafted or very
complex macros. For those, yes, I think it is a fine solution. That is
why clang-format has that feature to begin with, and you can see an
example in Mozilla's style guide which points here:
https://github.com/mozilla/gecko-dev/blob/master/xpcom/io/nsEscape.cpp#L22
Cheers,
Miguel
On Thu, 2019-09-12 at 16:00 -0700, Nick Desaulniers wrote:
> Consider the fact that not all kernel developers run checkpatch.pl.
> Is that a deficiency in checkpatch.pl, or the lack of enforcement in
> kernel developers' workflows?
No. Mostly it's because the kernel is like a bunch of little
untethered development planets, each with a little prince that
wants to keep their own little fiefdom separate from the others.
On Thu, 2019-09-12 at 23:58 +0200, Miguel Ojeda wrote:
> On Thu, Sep 12, 2019 at 11:08 PM Joe Perches <[email protected]> wrote:
> > Marking sections _no_auto_format_ isn't really a
> > good solution is it?
>
> I am thinking about special tables that are hand-crafted or very
> complex macros. For those, yes, I think it is a fine solution.
Can the 'clang-format on/off' trigger be indirected into
something non-clang specific via a macro?
Not every project is going to use only the clang-format tool.
On Fri, Sep 13, 2019 at 1:26 AM Joe Perches <[email protected]> wrote:
>
> Not every project is going to use only the clang-format tool.
Why? The end goal would be to enforce all code to be running under the
same formatting rules (which, in practice, means the same tool at the
moment).
Note that you can use clang-format with most editors (including vim,
emacs, VS, VSCode, XCode, Sublime, Atom...).
Cheers,
Miguel
On Thu, Sep 12, 2019 at 08:01:45AM -0700, Joe Perches wrote:
> On Thu, 2019-09-12 at 05:17 -0700, Christoph Hellwig wrote:
> > Instead of arguing what is better just stick to what the surrounding
> > code does.
>
> That's not always feasible nor readable.
>
> Especially for the logic inversion blocks where
> the existing code does unreadable and error prone
> things like hiding semicolons immediately after
> comments.
>
> if (foo)
> /* longish comment */;
> else {
> <code>;
> }
Which has nothing to do with your patch.
> > Or in other words: Feel free to be a codingstyle nazi for your code
> > (I am for some of mine), but leave others peoples code alone with
> > "cleanup" patches.
>
> My point was to avoid documenting per-subsystem
> coding style rules.
It is called common sense. In many cases different parts of the
subsystem might have slight variations. Just stick to your
preferred style in the bounds of coding style. Maintainers will
either remind you if they feel strongly that they have a slightly
different preference or just fix it up. What we really don't need
need it whitespace cleanup patches in the micro variation area.