summaryrefslogtreecommitdiff
path: root/fs/proc
diff options
context:
space:
mode:
authorAlexey Dobriyan <adobriyan@gmail.com>2016-12-12 16:45:28 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2016-12-12 18:55:09 -0800
commitbac5f5d56bbcb0ef7d3a926dd28b5f1db09117b7 (patch)
tree9f23c35f9c44ba0d21ff24db07c9da6afaa0bc28 /fs/proc
parent209b14dc030760d3a17029a5c3bd92c9d6fd3f37 (diff)
fs/proc/base.c: save decrement during lookup/readdir in /proc/$PID
Comparison for "<" works equally well as comparison for "<=" but one SUB/LEA is saved (no, it is not optimised away, at least here). Link: http://lkml.kernel.org/r/20161122195143.GA29812@avx2 Signed-off-by: Alexey Dobriyan <adobriyan@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/base.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 7c843024b406..04a5fcad4c34 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -2412,14 +2412,14 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
* Yes, it does not scale. And it should not. Don't add
* new entries into /proc/<tgid>/ without very good reasons.
*/
- last = &ents[nents - 1];
- for (p = ents; p <= last; p++) {
+ last = &ents[nents];
+ for (p = ents; p < last; p++) {
if (p->len != dentry->d_name.len)
continue;
if (!memcmp(dentry->d_name.name, p->name, p->len))
break;
}
- if (p > last)
+ if (p >= last)
goto out;
error = proc_pident_instantiate(dir, dentry, task, p);
@@ -2444,7 +2444,7 @@ static int proc_pident_readdir(struct file *file, struct dir_context *ctx,
if (ctx->pos >= nents + 2)
goto out;
- for (p = ents + (ctx->pos - 2); p <= ents + nents - 1; p++) {
+ for (p = ents + (ctx->pos - 2); p < ents + nents; p++) {
if (!proc_fill_cache(file, ctx, p->name, p->len,
proc_pident_instantiate, task, p))
break;