|
| 1 | +dm mpath: replace spin_lock_irqsave with spin_lock_irq |
| 2 | + |
| 3 | +jira LE-3526 |
| 4 | +Rebuild_History Non-Buildable kernel-6.12.0-55.20.1.el10_0 |
| 5 | +commit-author Mikulas Patocka < [email protected]> |
| 6 | +commit 050a3e71ce24c6f18d70679d68056f76375ff51c |
| 7 | +Empty-Commit: Cherry-Pick Conflicts during history rebuild. |
| 8 | +Will be included in final tarball splat. Ref for failed cherry-pick at: |
| 9 | +ciq/ciq_backports/kernel-6.12.0-55.20.1.el10_0/050a3e71.failed |
| 10 | + |
| 11 | +Replace spin_lock_irqsave/spin_unlock_irqrestore with |
| 12 | +spin_lock_irq/spin_unlock_irq at places where it is known that interrupts |
| 13 | +are enabled. |
| 14 | + |
| 15 | + Signed-off-by: Mikulas Patocka < [email protected]> |
| 16 | + Signed-off-by: Benjamin Marzinski < [email protected]> |
| 17 | +(cherry picked from commit 050a3e71ce24c6f18d70679d68056f76375ff51c) |
| 18 | + Signed-off-by: Jonathan Maple < [email protected]> |
| 19 | + |
| 20 | +# Conflicts: |
| 21 | +# drivers/md/dm-mpath.c |
| 22 | +diff --cc drivers/md/dm-mpath.c |
| 23 | +index 368606afb6f0,81fec2e1e0ef..000000000000 |
| 24 | +--- a/drivers/md/dm-mpath.c |
| 25 | ++++ b/drivers/md/dm-mpath.c |
| 26 | +@@@ -1476,11 -1478,15 +1469,11 @@@ static int switch_pg_num(struct multipa |
| 27 | + if (--pgnum) |
| 28 | + continue; |
| 29 | + |
| 30 | + - if (test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags)) |
| 31 | + - set_bit(MPATHF_NEED_PG_SWITCH, &m->flags); |
| 32 | + - else { |
| 33 | + - m->current_pgpath = NULL; |
| 34 | + - m->current_pg = NULL; |
| 35 | + - } |
| 36 | + + m->current_pgpath = NULL; |
| 37 | + + m->current_pg = NULL; |
| 38 | + m->next_pg = pg; |
| 39 | + } |
| 40 | +- spin_unlock_irqrestore(&m->lock, flags); |
| 41 | ++ spin_unlock_irq(&m->lock); |
| 42 | + |
| 43 | + schedule_work(&m->trigger_event); |
| 44 | + return 0; |
| 45 | +@@@ -1742,6 -1748,9 +1735,12 @@@ static void multipath_presuspend(struc |
| 46 | + { |
| 47 | + struct multipath *m = ti->private; |
| 48 | + |
| 49 | +++<<<<<<< HEAD |
| 50 | +++======= |
| 51 | ++ spin_lock_irq(&m->lock); |
| 52 | ++ m->is_suspending = true; |
| 53 | ++ spin_unlock_irq(&m->lock); |
| 54 | +++>>>>>>> 050a3e71ce24 (dm mpath: replace spin_lock_irqsave with spin_lock_irq) |
| 55 | + /* FIXME: bio-based shouldn't need to always disable queue_if_no_path */ |
| 56 | + if (m->queue_mode == DM_TYPE_BIO_BASED || !dm_noflush_suspending(m->ti)) |
| 57 | + queue_if_no_path(m, false, true, __func__); |
| 58 | +@@@ -1762,9 -1771,9 +1761,13 @@@ static void multipath_postsuspend(struc |
| 59 | + static void multipath_resume(struct dm_target *ti) |
| 60 | + { |
| 61 | + struct multipath *m = ti->private; |
| 62 | +- unsigned long flags; |
| 63 | + |
| 64 | +++<<<<<<< HEAD |
| 65 | + + spin_lock_irqsave(&m->lock, flags); |
| 66 | +++======= |
| 67 | ++ spin_lock_irq(&m->lock); |
| 68 | ++ m->is_suspending = false; |
| 69 | +++>>>>>>> 050a3e71ce24 (dm mpath: replace spin_lock_irqsave with spin_lock_irq) |
| 70 | + if (test_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags)) { |
| 71 | + set_bit(MPATHF_QUEUE_IF_NO_PATH, &m->flags); |
| 72 | + clear_bit(MPATHF_SAVED_QUEUE_IF_NO_PATH, &m->flags); |
| 73 | +@@@ -2021,6 -2028,113 +2022,116 @@@ out |
| 74 | + return r; |
| 75 | + } |
| 76 | + |
| 77 | +++<<<<<<< HEAD |
| 78 | +++======= |
| 79 | ++ /* |
| 80 | ++ * Perform a minimal read from the given path to find out whether the |
| 81 | ++ * path still works. If a path error occurs, fail it. |
| 82 | ++ */ |
| 83 | ++ static int probe_path(struct pgpath *pgpath) |
| 84 | ++ { |
| 85 | ++ struct block_device *bdev = pgpath->path.dev->bdev; |
| 86 | ++ unsigned int read_size = bdev_logical_block_size(bdev); |
| 87 | ++ struct page *page; |
| 88 | ++ struct bio *bio; |
| 89 | ++ blk_status_t status; |
| 90 | ++ int r = 0; |
| 91 | ++ |
| 92 | ++ if (WARN_ON_ONCE(read_size > PAGE_SIZE)) |
| 93 | ++ return -EINVAL; |
| 94 | ++ |
| 95 | ++ page = alloc_page(GFP_KERNEL); |
| 96 | ++ if (!page) |
| 97 | ++ return -ENOMEM; |
| 98 | ++ |
| 99 | ++ /* Perform a minimal read: Sector 0, length read_size */ |
| 100 | ++ bio = bio_alloc(bdev, 1, REQ_OP_READ, GFP_KERNEL); |
| 101 | ++ if (!bio) { |
| 102 | ++ r = -ENOMEM; |
| 103 | ++ goto out; |
| 104 | ++ } |
| 105 | ++ |
| 106 | ++ bio->bi_iter.bi_sector = 0; |
| 107 | ++ __bio_add_page(bio, page, read_size, 0); |
| 108 | ++ submit_bio_wait(bio); |
| 109 | ++ status = bio->bi_status; |
| 110 | ++ bio_put(bio); |
| 111 | ++ |
| 112 | ++ if (status && blk_path_error(status)) |
| 113 | ++ fail_path(pgpath); |
| 114 | ++ |
| 115 | ++ out: |
| 116 | ++ __free_page(page); |
| 117 | ++ return r; |
| 118 | ++ } |
| 119 | ++ |
| 120 | ++ /* |
| 121 | ++ * Probe all active paths in current_pg to find out whether they still work. |
| 122 | ++ * Fail all paths that do not work. |
| 123 | ++ * |
| 124 | ++ * Return -ENOTCONN if no valid path is left (even outside of current_pg). We |
| 125 | ++ * cannot probe paths in other pgs without switching current_pg, so if valid |
| 126 | ++ * paths are only in different pgs, they may or may not work. Additionally |
| 127 | ++ * we should not probe paths in a pathgroup that is in the process of |
| 128 | ++ * Initializing. Userspace can submit a request and we'll switch and wait |
| 129 | ++ * for the pathgroup to be initialized. If the request fails, it may need to |
| 130 | ++ * probe again. |
| 131 | ++ */ |
| 132 | ++ static int probe_active_paths(struct multipath *m) |
| 133 | ++ { |
| 134 | ++ struct pgpath *pgpath; |
| 135 | ++ struct priority_group *pg = NULL; |
| 136 | ++ int r = 0; |
| 137 | ++ |
| 138 | ++ spin_lock_irq(&m->lock); |
| 139 | ++ if (test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags)) { |
| 140 | ++ wait_event_lock_irq(m->probe_wait, |
| 141 | ++ !test_bit(MPATHF_DELAY_PG_SWITCH, &m->flags), |
| 142 | ++ m->lock); |
| 143 | ++ /* |
| 144 | ++ * if we waited because a probe was already in progress, |
| 145 | ++ * and it probed the current active pathgroup, don't |
| 146 | ++ * reprobe. Just return the number of valid paths |
| 147 | ++ */ |
| 148 | ++ if (m->current_pg == m->last_probed_pg) |
| 149 | ++ goto skip_probe; |
| 150 | ++ } |
| 151 | ++ if (!m->current_pg || m->is_suspending || |
| 152 | ++ test_bit(MPATHF_QUEUE_IO, &m->flags)) |
| 153 | ++ goto skip_probe; |
| 154 | ++ set_bit(MPATHF_DELAY_PG_SWITCH, &m->flags); |
| 155 | ++ pg = m->last_probed_pg = m->current_pg; |
| 156 | ++ spin_unlock_irq(&m->lock); |
| 157 | ++ |
| 158 | ++ list_for_each_entry(pgpath, &pg->pgpaths, list) { |
| 159 | ++ if (pg != READ_ONCE(m->current_pg) || |
| 160 | ++ READ_ONCE(m->is_suspending)) |
| 161 | ++ goto out; |
| 162 | ++ if (!pgpath->is_active) |
| 163 | ++ continue; |
| 164 | ++ |
| 165 | ++ r = probe_path(pgpath); |
| 166 | ++ if (r < 0) |
| 167 | ++ goto out; |
| 168 | ++ } |
| 169 | ++ |
| 170 | ++ out: |
| 171 | ++ spin_lock_irq(&m->lock); |
| 172 | ++ clear_bit(MPATHF_DELAY_PG_SWITCH, &m->flags); |
| 173 | ++ if (test_and_clear_bit(MPATHF_NEED_PG_SWITCH, &m->flags)) { |
| 174 | ++ m->current_pgpath = NULL; |
| 175 | ++ m->current_pg = NULL; |
| 176 | ++ } |
| 177 | ++ skip_probe: |
| 178 | ++ if (r == 0 && !atomic_read(&m->nr_valid_paths)) |
| 179 | ++ r = -ENOTCONN; |
| 180 | ++ spin_unlock_irq(&m->lock); |
| 181 | ++ if (pg) |
| 182 | ++ wake_up(&m->probe_wait); |
| 183 | ++ return r; |
| 184 | ++ } |
| 185 | ++ |
| 186 | +++>>>>>>> 050a3e71ce24 (dm mpath: replace spin_lock_irqsave with spin_lock_irq) |
| 187 | + static int multipath_prepare_ioctl(struct dm_target *ti, |
| 188 | + struct block_device **bdev, |
| 189 | + unsigned int cmd, unsigned long arg, |
| 190 | +@@@ -2028,9 -2142,18 +2139,8 @@@ |
| 191 | + { |
| 192 | + struct multipath *m = ti->private; |
| 193 | + struct pgpath *pgpath; |
| 194 | +- unsigned long flags; |
| 195 | + int r; |
| 196 | + |
| 197 | + - if (_IOC_TYPE(cmd) == DM_IOCTL) { |
| 198 | + - *forward = false; |
| 199 | + - switch (cmd) { |
| 200 | + - case DM_MPATH_PROBE_PATHS: |
| 201 | + - return probe_active_paths(m); |
| 202 | + - default: |
| 203 | + - return -ENOTTY; |
| 204 | + - } |
| 205 | + - } |
| 206 | + - |
| 207 | + pgpath = READ_ONCE(m->current_pgpath); |
| 208 | + if (!pgpath || !mpath_double_check_test_bit(MPATHF_QUEUE_IO, m)) |
| 209 | + pgpath = choose_pgpath(m, 0); |
| 210 | +* Unmerged path drivers/md/dm-mpath.c |
0 commit comments