bluestore_block_db_size ignored
Pages: 1 2
Ingo Hellmer
18 Posts
July 10, 2023, 6:15 amQuote from Ingo Hellmer on July 10, 2023, 6:15 amI am using PetaSAN 3.1. I was successful on three OSDs. The other three I will try when I have the next maintenance slot.
Here the value before
ceph daemon osd.6 perf dump | jq '{bluefs:.bluefs}'
{
"bluefs": {
"gift_bytes": 0,
"reclaim_bytes": 0,
"db_total_bytes": 794567872512,
"db_used_bytes": 42356150272,
"wal_total_bytes": 0,
"wal_used_bytes": 0,
"slow_total_bytes": 351843581952,
"slow_used_bytes": 66498461696,
"num_files": 1730,
"log_bytes": 9670656,
"log_compactions": 29,
"logged_bytes": 465784832,
"files_written_wal": 2,
"files_written_sst": 8760,
"bytes_written_wal": 187023945728,
"bytes_written_sst": 514929278976,
"bytes_written_slow": 0,
"max_bytes_wal": 0,
"max_bytes_db": 46491734016,
"max_bytes_slow": 0,
"read_random_count": 30085874,
"read_random_bytes": 707852574430,
"read_random_disk_count": 12452426,
"read_random_disk_bytes": 621318558952,
"read_random_buffer_count": 17751574,
"read_random_buffer_bytes": 86534015478,
"read_count": 6727080,
"read_bytes": 185227070725,
"read_prefetch_count": 6716055,
"read_prefetch_bytes": 184865965093,
"read_zeros_candidate": 0,
"read_zeros_errors": 0
}
}
and after compact
ceph daemon osd.6 perf dump | jq '{bluefs:.bluefs}'
{
"bluefs": {
"gift_bytes": 0,
"reclaim_bytes": 0,
"db_total_bytes": 794567872512,
"db_used_bytes": 92989788160,
"wal_total_bytes": 0,
"wal_used_bytes": 0,
"slow_total_bytes": 351843581952,
"slow_used_bytes": 0,
"num_files": 1362,
"log_bytes": 2453504,
"log_compactions": 0,
"logged_bytes": 1085440,
"files_written_wal": 1,
"files_written_sst": 0,
"bytes_written_wal": 44199936,
"bytes_written_sst": 0,
"bytes_written_slow": 0,
"max_bytes_wal": 0,
"max_bytes_db": 92989788160,
"max_bytes_slow": 0,
"read_random_count": 33987,
"read_random_bytes": 166101580,
"read_random_disk_count": 1540,
"read_random_disk_bytes": 47363313,
"read_random_buffer_count": 32448,
"read_random_buffer_bytes": 118738267,
"read_count": 2531,
"read_bytes": 143946259,
"read_prefetch_count": 2297,
"read_prefetch_bytes": 137845150,
"read_zeros_candidate": 0,
"read_zeros_errors": 0
}
}
I think the slow bytes used is the value that shows the spillover.
I am using PetaSAN 3.1. I was successful on three OSDs. The other three I will try when I have the next maintenance slot.
Here the value before
ceph daemon osd.6 perf dump | jq '{bluefs:.bluefs}'
{
"bluefs": {
"gift_bytes": 0,
"reclaim_bytes": 0,
"db_total_bytes": 794567872512,
"db_used_bytes": 42356150272,
"wal_total_bytes": 0,
"wal_used_bytes": 0,
"slow_total_bytes": 351843581952,
"slow_used_bytes": 66498461696,
"num_files": 1730,
"log_bytes": 9670656,
"log_compactions": 29,
"logged_bytes": 465784832,
"files_written_wal": 2,
"files_written_sst": 8760,
"bytes_written_wal": 187023945728,
"bytes_written_sst": 514929278976,
"bytes_written_slow": 0,
"max_bytes_wal": 0,
"max_bytes_db": 46491734016,
"max_bytes_slow": 0,
"read_random_count": 30085874,
"read_random_bytes": 707852574430,
"read_random_disk_count": 12452426,
"read_random_disk_bytes": 621318558952,
"read_random_buffer_count": 17751574,
"read_random_buffer_bytes": 86534015478,
"read_count": 6727080,
"read_bytes": 185227070725,
"read_prefetch_count": 6716055,
"read_prefetch_bytes": 184865965093,
"read_zeros_candidate": 0,
"read_zeros_errors": 0
}
}
and after compact
ceph daemon osd.6 perf dump | jq '{bluefs:.bluefs}'
{
"bluefs": {
"gift_bytes": 0,
"reclaim_bytes": 0,
"db_total_bytes": 794567872512,
"db_used_bytes": 92989788160,
"wal_total_bytes": 0,
"wal_used_bytes": 0,
"slow_total_bytes": 351843581952,
"slow_used_bytes": 0,
"num_files": 1362,
"log_bytes": 2453504,
"log_compactions": 0,
"logged_bytes": 1085440,
"files_written_wal": 1,
"files_written_sst": 0,
"bytes_written_wal": 44199936,
"bytes_written_sst": 0,
"bytes_written_slow": 0,
"max_bytes_wal": 0,
"max_bytes_db": 92989788160,
"max_bytes_slow": 0,
"read_random_count": 33987,
"read_random_bytes": 166101580,
"read_random_disk_count": 1540,
"read_random_disk_bytes": 47363313,
"read_random_buffer_count": 32448,
"read_random_buffer_bytes": 118738267,
"read_count": 2531,
"read_bytes": 143946259,
"read_prefetch_count": 2297,
"read_prefetch_bytes": 137845150,
"read_zeros_candidate": 0,
"read_zeros_errors": 0
}
}
I think the slow bytes used is the value that shows the spillover.
admin
2,918 Posts
July 10, 2023, 3:54 pmQuote from admin on July 10, 2023, 3:54 pmVery good, the manual compaction fixed it. Note that the OSD will regularly perform db compaction so it should resolved by itself.
Very good, the manual compaction fixed it. Note that the OSD will regularly perform db compaction so it should resolved by itself.
Pages: 1 2
bluestore_block_db_size ignored
Ingo Hellmer
18 Posts
Quote from Ingo Hellmer on July 10, 2023, 6:15 amI am using PetaSAN 3.1. I was successful on three OSDs. The other three I will try when I have the next maintenance slot.
Here the value before
ceph daemon osd.6 perf dump | jq '{bluefs:.bluefs}'
{
"bluefs": {
"gift_bytes": 0,
"reclaim_bytes": 0,
"db_total_bytes": 794567872512,
"db_used_bytes": 42356150272,
"wal_total_bytes": 0,
"wal_used_bytes": 0,
"slow_total_bytes": 351843581952,
"slow_used_bytes": 66498461696,
"num_files": 1730,
"log_bytes": 9670656,
"log_compactions": 29,
"logged_bytes": 465784832,
"files_written_wal": 2,
"files_written_sst": 8760,
"bytes_written_wal": 187023945728,
"bytes_written_sst": 514929278976,
"bytes_written_slow": 0,
"max_bytes_wal": 0,
"max_bytes_db": 46491734016,
"max_bytes_slow": 0,
"read_random_count": 30085874,
"read_random_bytes": 707852574430,
"read_random_disk_count": 12452426,
"read_random_disk_bytes": 621318558952,
"read_random_buffer_count": 17751574,
"read_random_buffer_bytes": 86534015478,
"read_count": 6727080,
"read_bytes": 185227070725,
"read_prefetch_count": 6716055,
"read_prefetch_bytes": 184865965093,
"read_zeros_candidate": 0,
"read_zeros_errors": 0
}
}and after compact
ceph daemon osd.6 perf dump | jq '{bluefs:.bluefs}'
{
"bluefs": {
"gift_bytes": 0,
"reclaim_bytes": 0,
"db_total_bytes": 794567872512,
"db_used_bytes": 92989788160,
"wal_total_bytes": 0,
"wal_used_bytes": 0,
"slow_total_bytes": 351843581952,
"slow_used_bytes": 0,
"num_files": 1362,
"log_bytes": 2453504,
"log_compactions": 0,
"logged_bytes": 1085440,
"files_written_wal": 1,
"files_written_sst": 0,
"bytes_written_wal": 44199936,
"bytes_written_sst": 0,
"bytes_written_slow": 0,
"max_bytes_wal": 0,
"max_bytes_db": 92989788160,
"max_bytes_slow": 0,
"read_random_count": 33987,
"read_random_bytes": 166101580,
"read_random_disk_count": 1540,
"read_random_disk_bytes": 47363313,
"read_random_buffer_count": 32448,
"read_random_buffer_bytes": 118738267,
"read_count": 2531,
"read_bytes": 143946259,
"read_prefetch_count": 2297,
"read_prefetch_bytes": 137845150,
"read_zeros_candidate": 0,
"read_zeros_errors": 0
}
}
I think the slow bytes used is the value that shows the spillover.
I am using PetaSAN 3.1. I was successful on three OSDs. The other three I will try when I have the next maintenance slot.
Here the value before
ceph daemon osd.6 perf dump | jq '{bluefs:.bluefs}'
{
"bluefs": {
"gift_bytes": 0,
"reclaim_bytes": 0,
"db_total_bytes": 794567872512,
"db_used_bytes": 42356150272,
"wal_total_bytes": 0,
"wal_used_bytes": 0,
"slow_total_bytes": 351843581952,
"slow_used_bytes": 66498461696,
"num_files": 1730,
"log_bytes": 9670656,
"log_compactions": 29,
"logged_bytes": 465784832,
"files_written_wal": 2,
"files_written_sst": 8760,
"bytes_written_wal": 187023945728,
"bytes_written_sst": 514929278976,
"bytes_written_slow": 0,
"max_bytes_wal": 0,
"max_bytes_db": 46491734016,
"max_bytes_slow": 0,
"read_random_count": 30085874,
"read_random_bytes": 707852574430,
"read_random_disk_count": 12452426,
"read_random_disk_bytes": 621318558952,
"read_random_buffer_count": 17751574,
"read_random_buffer_bytes": 86534015478,
"read_count": 6727080,
"read_bytes": 185227070725,
"read_prefetch_count": 6716055,
"read_prefetch_bytes": 184865965093,
"read_zeros_candidate": 0,
"read_zeros_errors": 0
}
}
and after compact
ceph daemon osd.6 perf dump | jq '{bluefs:.bluefs}'
{
"bluefs": {
"gift_bytes": 0,
"reclaim_bytes": 0,
"db_total_bytes": 794567872512,
"db_used_bytes": 92989788160,
"wal_total_bytes": 0,
"wal_used_bytes": 0,
"slow_total_bytes": 351843581952,
"slow_used_bytes": 0,
"num_files": 1362,
"log_bytes": 2453504,
"log_compactions": 0,
"logged_bytes": 1085440,
"files_written_wal": 1,
"files_written_sst": 0,
"bytes_written_wal": 44199936,
"bytes_written_sst": 0,
"bytes_written_slow": 0,
"max_bytes_wal": 0,
"max_bytes_db": 92989788160,
"max_bytes_slow": 0,
"read_random_count": 33987,
"read_random_bytes": 166101580,
"read_random_disk_count": 1540,
"read_random_disk_bytes": 47363313,
"read_random_buffer_count": 32448,
"read_random_buffer_bytes": 118738267,
"read_count": 2531,
"read_bytes": 143946259,
"read_prefetch_count": 2297,
"read_prefetch_bytes": 137845150,
"read_zeros_candidate": 0,
"read_zeros_errors": 0
}
}
I think the slow bytes used is the value that shows the spillover.
admin
2,918 Posts
Quote from admin on July 10, 2023, 3:54 pmVery good, the manual compaction fixed it. Note that the OSD will regularly perform db compaction so it should resolved by itself.
Very good, the manual compaction fixed it. Note that the OSD will regularly perform db compaction so it should resolved by itself.