Skip to content

Commit ff3f711

Browse files
sweetteaMikulas Patocka
authored andcommitted
dm vdo: remove remaining ring references
Lists are the new rings, so update all remaining references to rings to talk about lists. Signed-off-by: Sweet Tea Dorminy <[email protected]> Signed-off-by: Matthew Sakai <[email protected]> Signed-off-by: Mikulas Patocka <[email protected]>
1 parent 51ba14f commit ff3f711

File tree

7 files changed

+22
-22
lines changed

7 files changed

+22
-22
lines changed

drivers/md/dm-vdo/block-map.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -451,7 +451,7 @@ static struct page_info * __must_check find_page(struct vdo_page_cache *cache,
451451
* select_lru_page() - Determine which page is least recently used.
452452
*
453453
* Picks the least recently used from among the non-busy entries at the front of each of the lru
454-
* ring. Since whenever we mark a page busy we also put it to the end of the ring it is unlikely
454+
* list. Since whenever we mark a page busy we also put it to the end of the list it is unlikely
455455
* that the entries at the front are busy unless the queue is very short, but not impossible.
456456
*
457457
* Return: A pointer to the info structure for a relevant page, or NULL if no such page can be

drivers/md/dm-vdo/dedupe.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@ struct hash_lock {
226226
* A list containing the data VIOs sharing this lock, all having the same record name and
227227
* data block contents, linked by their hash_lock_node fields.
228228
*/
229-
struct list_head duplicate_ring;
229+
struct list_head duplicate_vios;
230230

231231
/* The number of data_vios sharing this lock instance */
232232
data_vio_count_t reference_count;
@@ -343,7 +343,7 @@ static void return_hash_lock_to_pool(struct hash_zone *zone, struct hash_lock *l
343343
{
344344
memset(lock, 0, sizeof(*lock));
345345
INIT_LIST_HEAD(&lock->pool_node);
346-
INIT_LIST_HEAD(&lock->duplicate_ring);
346+
INIT_LIST_HEAD(&lock->duplicate_vios);
347347
vdo_waitq_init(&lock->waiters);
348348
list_add_tail(&lock->pool_node, &zone->lock_pool);
349349
}
@@ -441,7 +441,7 @@ static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock)
441441
VDO_ASSERT_LOG_ONLY(data_vio->hash_zone != NULL,
442442
"must have a hash zone when holding a hash lock");
443443
VDO_ASSERT_LOG_ONLY(!list_empty(&data_vio->hash_lock_entry),
444-
"must be on a hash lock ring when holding a hash lock");
444+
"must be on a hash lock list when holding a hash lock");
445445
VDO_ASSERT_LOG_ONLY(old_lock->reference_count > 0,
446446
"hash lock reference must be counted");
447447

@@ -464,10 +464,10 @@ static void set_hash_lock(struct data_vio *data_vio, struct hash_lock *new_lock)
464464

465465
if (new_lock != NULL) {
466466
/*
467-
* Keep all data_vios sharing the lock on a ring since they can complete in any
467+
* Keep all data_vios sharing the lock on a list since they can complete in any
468468
* order and we'll always need a pointer to one to compare data.
469469
*/
470-
list_move_tail(&data_vio->hash_lock_entry, &new_lock->duplicate_ring);
470+
list_move_tail(&data_vio->hash_lock_entry, &new_lock->duplicate_vios);
471471
new_lock->reference_count += 1;
472472
if (new_lock->max_references < new_lock->reference_count)
473473
new_lock->max_references = new_lock->reference_count;
@@ -1789,10 +1789,10 @@ static bool is_hash_collision(struct hash_lock *lock, struct data_vio *candidate
17891789
struct hash_zone *zone;
17901790
bool collides;
17911791

1792-
if (list_empty(&lock->duplicate_ring))
1792+
if (list_empty(&lock->duplicate_vios))
17931793
return false;
17941794

1795-
lock_holder = list_first_entry(&lock->duplicate_ring, struct data_vio,
1795+
lock_holder = list_first_entry(&lock->duplicate_vios, struct data_vio,
17961796
hash_lock_entry);
17971797
zone = candidate->hash_zone;
17981798
collides = !blocks_equal(lock_holder->vio.data, candidate->vio.data);
@@ -1815,7 +1815,7 @@ static inline int assert_hash_lock_preconditions(const struct data_vio *data_vio
18151815
return result;
18161816

18171817
result = VDO_ASSERT(list_empty(&data_vio->hash_lock_entry),
1818-
"must not already be a member of a hash lock ring");
1818+
"must not already be a member of a hash lock list");
18191819
if (result != VDO_SUCCESS)
18201820
return result;
18211821

@@ -1942,8 +1942,8 @@ void vdo_release_hash_lock(struct data_vio *data_vio)
19421942
"returned hash lock must not be in use with state %s",
19431943
get_hash_lock_state_name(lock->state));
19441944
VDO_ASSERT_LOG_ONLY(list_empty(&lock->pool_node),
1945-
"hash lock returned to zone must not be in a pool ring");
1946-
VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_ring),
1945+
"hash lock returned to zone must not be in a pool list");
1946+
VDO_ASSERT_LOG_ONLY(list_empty(&lock->duplicate_vios),
19471947
"hash lock returned to zone must not reference DataVIOs");
19481948

19491949
return_hash_lock_to_pool(zone, lock);

drivers/md/dm-vdo/packer.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -46,7 +46,7 @@ struct compressed_block {
4646

4747
/*
4848
* Each packer_bin holds an incomplete batch of data_vios that only partially fill a compressed
49-
* block. The bins are kept in a ring sorted by the amount of unused space so the first bin with
49+
* block. The bins are kept in a list sorted by the amount of unused space so the first bin with
5050
* enough space to hold a newly-compressed data_vio can easily be found. When the bin fills up or
5151
* is flushed, the first uncanceled data_vio in the bin is selected to be the agent for that bin.
5252
* Upon entering the packer, each data_vio already has its compressed data in the first slot of the

drivers/md/dm-vdo/priority-table.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -199,7 +199,7 @@ void vdo_priority_table_remove(struct priority_table *table, struct list_head *e
199199

200200
/*
201201
* Remove the entry from the bucket list, remembering a pointer to another entry in the
202-
* ring.
202+
* list.
203203
*/
204204
next_entry = entry->next;
205205
list_del_init(entry);

drivers/md/dm-vdo/recovery-journal.h

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -43,9 +43,9 @@
4343
* has a vio which is used to commit that block to disk. The vio's data is the on-disk
4444
* representation of the journal block. In addition each in-memory block has a buffer which is used
4545
* to accumulate entries while a partial commit of the block is in progress. In-memory blocks are
46-
* kept on two rings. Free blocks live on the 'free_tail_blocks' ring. When a block becomes active
47-
* (see below) it is moved to the 'active_tail_blocks' ring. When a block is fully committed, it is
48-
* moved back to the 'free_tail_blocks' ring.
46+
* kept on two lists. Free blocks live on the 'free_tail_blocks' list. When a block becomes active
47+
* (see below) it is moved to the 'active_tail_blocks' list. When a block is fully committed, it is
48+
* moved back to the 'free_tail_blocks' list.
4949
*
5050
* When entries are added to the journal, they are added to the active in-memory block, as
5151
* indicated by the 'active_block' field. If the caller wishes to wait for the entry to be

drivers/md/dm-vdo/slab-depot.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -139,7 +139,7 @@ static bool is_slab_journal_blank(const struct vdo_slab *slab)
139139
}
140140

141141
/**
142-
* mark_slab_journal_dirty() - Put a slab journal on the dirty ring of its allocator in the correct
142+
* mark_slab_journal_dirty() - Put a slab journal on the dirty list of its allocator in the correct
143143
* order.
144144
* @journal: The journal to be marked dirty.
145145
* @lock: The recovery journal lock held by the slab journal.
@@ -821,7 +821,7 @@ static void commit_tail(struct slab_journal *journal)
821821

822822
/*
823823
* Since we are about to commit the tail block, this journal no longer needs to be on the
824-
* ring of journals which the recovery journal might ask to commit.
824+
* list of journals which the recovery journal might ask to commit.
825825
*/
826826
mark_slab_journal_clean(journal);
827827

@@ -1371,7 +1371,7 @@ static unsigned int calculate_slab_priority(struct vdo_slab *slab)
13711371
static void prioritize_slab(struct vdo_slab *slab)
13721372
{
13731373
VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
1374-
"a slab must not already be on a ring when prioritizing");
1374+
"a slab must not already be on a list when prioritizing");
13751375
slab->priority = calculate_slab_priority(slab);
13761376
vdo_priority_table_enqueue(slab->allocator->prioritized_slabs,
13771377
slab->priority, &slab->allocq_entry);
@@ -2562,7 +2562,7 @@ static void queue_slab(struct vdo_slab *slab)
25622562
int result;
25632563

25642564
VDO_ASSERT_LOG_ONLY(list_empty(&slab->allocq_entry),
2565-
"a requeued slab must not already be on a ring");
2565+
"a requeued slab must not already be on a list");
25662566

25672567
if (vdo_is_read_only(allocator->depot->vdo))
25682568
return;
@@ -3297,7 +3297,7 @@ int vdo_release_block_reference(struct block_allocator *allocator,
32973297
* This is a min_heap callback function orders slab_status structures using the 'is_clean' field as
32983298
* the primary key and the 'emptiness' field as the secondary key.
32993299
*
3300-
* Slabs need to be pushed onto the rings in the same order they are to be popped off. Popping
3300+
* Slabs need to be pushed onto the lists in the same order they are to be popped off. Popping
33013301
* should always get the most empty first, so pushing should be from most empty to least empty.
33023302
* Thus, the ordering is reversed from the usual sense since min_heap returns smaller elements
33033303
* before larger ones.

drivers/md/dm-vdo/wait-queue.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ void vdo_waitq_enqueue_waiter(struct vdo_wait_queue *waitq, struct vdo_waiter *w
3434
waitq->last_waiter->next_waiter = waiter;
3535
}
3636

37-
/* In both cases, the waiter we added to the ring becomes the last waiter. */
37+
/* In both cases, the waiter we added to the list becomes the last waiter. */
3838
waitq->last_waiter = waiter;
3939
waitq->length += 1;
4040
}

0 commit comments

Comments
 (0)