Skip to content

Commit 7455d9a

Browse files
committed
fix: use auto-detected sector size for blockdev
This fixes the behaviour where we pass 512 as sector size if the disk uri doesn't contain blk_size parameter. This causes pool creation failure if the underlying disk has a different sector size e.g. 4096. Instead of passing 512, we now pass 0 which lets spdk detect the device's sector size and use that value. Signed-off-by: Diwakar Sharma <diwakar.sharma@datacore.com>
1 parent a7e00d7 commit 7455d9a

File tree

5 files changed

+195
-28
lines changed

5 files changed

+195
-28
lines changed

io-engine-tests/src/lib.rs

+22
Original file line numberDiff line numberDiff line change
@@ -188,6 +188,28 @@ pub fn truncate_file_bytes(path: &str, size: u64) {
188188
assert!(output.status.success());
189189
}
190190

191+
/// Automatically assign a loopdev to path
192+
pub fn setup_loopdev_file(path: &str, sector_size: Option<u64>) -> String {
193+
let log_sec = sector_size.unwrap_or(512);
194+
195+
let output = Command::new("losetup")
196+
.args(["-f", "--show", "-b", &format!("{log_sec}"), path])
197+
.output()
198+
.expect("failed exec losetup");
199+
assert!(output.status.success());
200+
// return the assigned loop device
201+
String::from_utf8(output.stdout).unwrap().trim().to_string()
202+
}
203+
204+
/// Detach the provided loop device.
205+
pub fn detach_loopdev(dev: &str) {
206+
let output = Command::new("losetup")
207+
.args(["-d", dev])
208+
.output()
209+
.expect("failed exec losetup");
210+
assert!(output.status.success());
211+
}
212+
191213
pub fn fscheck(device: &str) {
192214
let output = Command::new("fsck")
193215
.args([device, "-n"])

io-engine/src/bdev/aio.rs

+13-3
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@ use std::{
33
convert::TryFrom,
44
ffi::CString,
55
fmt::{Debug, Formatter},
6+
os::unix::fs::FileTypeExt,
67
};
78

89
use async_trait::async_trait;
@@ -29,7 +30,7 @@ pub(super) struct Aio {
2930

3031
impl Debug for Aio {
3132
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
32-
write!(f, "Aio '{}'", self.name)
33+
write!(f, "Aio '{}', 'blk_size: {}'", self.name, self.blk_size)
3334
}
3435
}
3536

@@ -47,6 +48,10 @@ impl TryFrom<&Url> for Aio {
4748
});
4849
}
4950

51+
let path_is_blockdev = std::fs::metadata(url.path())
52+
.ok()
53+
.map_or(false, |meta| meta.file_type().is_block_device());
54+
5055
let mut parameters: HashMap<String, String> =
5156
url.query_pairs().into_owned().collect();
5257

@@ -58,9 +63,14 @@ impl TryFrom<&Url> for Aio {
5863
value: value.clone(),
5964
})?
6065
}
61-
None => 512,
66+
None => {
67+
if path_is_blockdev {
68+
0
69+
} else {
70+
512
71+
}
72+
}
6273
};
63-
6474
let uuid = uri::uuid(parameters.remove("uuid")).context(
6575
bdev_api::UuidParamParseFailed {
6676
uri: url.to_string(),

io-engine/src/bdev/uring.rs

+17-2
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,9 @@
1-
use std::{collections::HashMap, convert::TryFrom, ffi::CString};
1+
use std::{
2+
collections::HashMap,
3+
convert::TryFrom,
4+
ffi::CString,
5+
os::unix::fs::FileTypeExt,
6+
};
27

38
use async_trait::async_trait;
49
use futures::channel::oneshot;
@@ -36,6 +41,10 @@ impl TryFrom<&Url> for Uring {
3641
});
3742
}
3843

44+
let path_is_blockdev = std::fs::metadata(url.path())
45+
.ok()
46+
.map_or(false, |meta| meta.file_type().is_block_device());
47+
3948
let mut parameters: HashMap<String, String> =
4049
url.query_pairs().into_owned().collect();
4150

@@ -47,7 +56,13 @@ impl TryFrom<&Url> for Uring {
4756
value: value.clone(),
4857
})?
4958
}
50-
None => 512,
59+
None => {
60+
if path_is_blockdev {
61+
0
62+
} else {
63+
512
64+
}
65+
}
5166
};
5267

5368
let uuid = uri::uuid(parameters.remove("uuid")).context(

io-engine/tests/lvs_pool.rs

+124-23
Original file line numberDiff line numberDiff line change
@@ -16,14 +16,33 @@ use std::pin::Pin;
1616

1717
pub mod common;
1818

19-
static DISKNAME1: &str = "/tmp/disk1.img";
20-
static DISKNAME2: &str = "/tmp/disk2.img";
19+
static TESTDIR: &str = "/tmp/io-engine-tests";
20+
static DISKNAME1: &str = "/tmp/io-engine-tests/disk1.img";
21+
static DISKNAME2: &str = "/tmp/io-engine-tests/disk2.img";
22+
static DISKNAME3: &str = "/tmp/io-engine-tests/disk3.img";
2123

2224
#[tokio::test]
2325
async fn lvs_pool_test() {
24-
common::delete_file(&[DISKNAME1.into(), DISKNAME2.into()]);
26+
// Create directory for placing test disk files
27+
// todo: Create this from some common place and use for all other tests too.
28+
let _ = std::process::Command::new("mkdir")
29+
.args(["-p"])
30+
.args([TESTDIR])
31+
.output()
32+
.expect("failed to execute mkdir");
33+
34+
common::delete_file(&[
35+
DISKNAME1.into(),
36+
DISKNAME2.into(),
37+
DISKNAME3.into(),
38+
]);
2539
common::truncate_file(DISKNAME1, 128 * 1024);
2640
common::truncate_file(DISKNAME2, 128 * 1024);
41+
common::truncate_file(DISKNAME3, 128 * 1024);
42+
43+
//setup disk3 via loop device using a sector size of 4096.
44+
let ldev = common::setup_loopdev_file(DISKNAME3, Some(4096));
45+
2746
let args = MayastorCliArgs {
2847
reactor_mask: "0x3".into(),
2948
..Default::default()
@@ -32,15 +51,17 @@ async fn lvs_pool_test() {
3251

3352
// should fail to import a pool that does not exist on disk
3453
ms.spawn(async {
35-
assert!(Lvs::import("tpool", "aio:///tmp/disk1.img").await.is_err())
54+
assert!(Lvs::import("tpool", format!("aio://{DISKNAME1}").as_str())
55+
.await
56+
.is_err())
3657
})
3758
.await;
3859

3960
// should succeed to create a pool we can not import
4061
ms.spawn(async {
4162
Lvs::create_or_import(PoolArgs {
4263
name: "tpool".into(),
43-
disks: vec!["aio:///tmp/disk1.img".into()],
64+
disks: vec![format!("aio://{DISKNAME1}")],
4465
uuid: None,
4566
cluster_size: None,
4667
backend: PoolBackend::Lvs,
@@ -55,16 +76,23 @@ async fn lvs_pool_test() {
5576
// have an idempotent snafu, we dont crash and
5677
// burn
5778
ms.spawn(async {
58-
assert!(Lvs::create("tpool", "aio:///tmp/disk1.img", None, None)
59-
.await
60-
.is_err())
79+
assert!(Lvs::create(
80+
"tpool",
81+
format!("aio://{DISKNAME1}").as_str(),
82+
None,
83+
None
84+
)
85+
.await
86+
.is_err())
6187
})
6288
.await;
6389

6490
// should fail to import the pool that is already imported
6591
// similar to above, we use the import directly
6692
ms.spawn(async {
67-
assert!(Lvs::import("tpool", "aio:///tmp/disk1.img").await.is_err())
93+
assert!(Lvs::import("tpool", format!("aio://{DISKNAME1}").as_str())
94+
.await
95+
.is_err())
6896
})
6997
.await;
7098

@@ -75,7 +103,7 @@ async fn lvs_pool_test() {
75103
assert_eq!(pool.name(), "tpool");
76104
assert_eq!(pool.used(), 0);
77105
dbg!(pool.uuid());
78-
assert_eq!(pool.base_bdev().name(), "/tmp/disk1.img");
106+
assert_eq!(pool.base_bdev().name(), DISKNAME1);
79107
})
80108
.await;
81109

@@ -90,9 +118,13 @@ async fn lvs_pool_test() {
90118
// import and export implicitly destroy the base_bdev, for
91119
// testing import and create we
92120
// sometimes create the base_bdev manually
93-
bdev_create("aio:///tmp/disk1.img").await.unwrap();
121+
bdev_create(format!("aio://{DISKNAME1}").as_str())
122+
.await
123+
.unwrap();
94124

95-
assert!(Lvs::import("tpool", "aio:///tmp/disk1.img").await.is_ok());
125+
assert!(Lvs::import("tpool", format!("aio://{DISKNAME1}").as_str())
126+
.await
127+
.is_ok());
96128

97129
let pool = Lvs::lookup("tpool").unwrap();
98130
assert_eq!(pool.uuid(), uuid);
@@ -107,13 +139,22 @@ async fn lvs_pool_test() {
107139
let uuid = pool.uuid();
108140
pool.destroy().await.unwrap();
109141

110-
bdev_create("aio:///tmp/disk1.img").await.unwrap();
111-
assert!(Lvs::import("tpool", "aio:///tmp/disk1.img").await.is_err());
142+
bdev_create(format!("aio://{DISKNAME1}").as_str())
143+
.await
144+
.unwrap();
145+
assert!(Lvs::import("tpool", format!("aio://{DISKNAME1}").as_str())
146+
.await
147+
.is_err());
112148

113149
assert_eq!(Lvs::iter().count(), 0);
114-
assert!(Lvs::create("tpool", "aio:///tmp/disk1.img", None, None)
115-
.await
116-
.is_ok());
150+
assert!(Lvs::create(
151+
"tpool",
152+
format!("aio://{DISKNAME1}").as_str(),
153+
None,
154+
None
155+
)
156+
.await
157+
.is_ok());
117158

118159
let pool = Lvs::lookup("tpool").unwrap();
119160
assert_ne!(uuid, pool.uuid());
@@ -181,7 +222,7 @@ async fn lvs_pool_test() {
181222
pool.export().await.unwrap();
182223
let pool = Lvs::create_or_import(PoolArgs {
183224
name: "tpool".to_string(),
184-
disks: vec!["aio:///tmp/disk1.img".to_string()],
225+
disks: vec![format!("aio://{DISKNAME1}")],
185226
uuid: None,
186227
cluster_size: None,
187228
backend: PoolBackend::Lvs,
@@ -297,8 +338,12 @@ async fn lvs_pool_test() {
297338
// import the pool all shares should be there, but also validate
298339
// the share that not shared to be -- not shared
299340
ms.spawn(async {
300-
bdev_create("aio:///tmp/disk1.img").await.unwrap();
301-
let pool = Lvs::import("tpool", "aio:///tmp/disk1.img").await.unwrap();
341+
bdev_create(format!("aio://{DISKNAME1}").as_str())
342+
.await
343+
.unwrap();
344+
let pool = Lvs::import("tpool", format!("aio://{DISKNAME1}").as_str())
345+
.await
346+
.unwrap();
302347

303348
for l in pool.lvols().unwrap() {
304349
if l.name() == "notshared" {
@@ -321,7 +366,7 @@ async fn lvs_pool_test() {
321366

322367
let pool = Lvs::create_or_import(PoolArgs {
323368
name: "tpool".into(),
324-
disks: vec!["aio:///tmp/disk1.img".into()],
369+
disks: vec![format!("aio://{DISKNAME1}")],
325370
uuid: None,
326371
cluster_size: None,
327372
backend: PoolBackend::Lvs,
@@ -336,6 +381,60 @@ async fn lvs_pool_test() {
336381
})
337382
.await;
338383

384+
let pool_dev_aio = ldev.clone();
385+
// should succeed to create an aio bdev pool on a loop blockdev of 4096
386+
// bytes sector size.
387+
ms.spawn(async move {
388+
Lvs::create_or_import(PoolArgs {
389+
name: "tpool_4k_aio".into(),
390+
disks: vec![format!("aio://{pool_dev_aio}")],
391+
uuid: None,
392+
cluster_size: None,
393+
backend: PoolBackend::Lvs,
394+
})
395+
.await
396+
.unwrap();
397+
})
398+
.await;
399+
400+
// should be able to find our new LVS created on loopdev, and subsequently
401+
// destroy it.
402+
ms.spawn(async {
403+
let pool = Lvs::lookup("tpool_4k_aio").unwrap();
404+
assert_eq!(pool.name(), "tpool_4k_aio");
405+
assert_eq!(pool.used(), 0);
406+
dbg!(pool.uuid());
407+
pool.destroy().await.unwrap();
408+
})
409+
.await;
410+
411+
let pool_dev_uring = ldev.clone();
412+
// should succeed to create an uring pool on a loop blockdev of 4096 bytes
413+
// sector size.
414+
ms.spawn(async move {
415+
Lvs::create_or_import(PoolArgs {
416+
name: "tpool_4k_uring".into(),
417+
disks: vec![format!("uring://{pool_dev_uring}")],
418+
uuid: None,
419+
cluster_size: None,
420+
backend: PoolBackend::Lvs,
421+
})
422+
.await
423+
.unwrap();
424+
})
425+
.await;
426+
427+
// should be able to find our new LVS created on loopdev, and subsequently
428+
// destroy it.
429+
ms.spawn(async {
430+
let pool = Lvs::lookup("tpool_4k_uring").unwrap();
431+
assert_eq!(pool.name(), "tpool_4k_uring");
432+
assert_eq!(pool.used(), 0);
433+
dbg!(pool.uuid());
434+
pool.destroy().await.unwrap();
435+
})
436+
.await;
437+
339438
// validate the expected state of mayastor
340439
ms.spawn(async {
341440
// no shares left except for the discovery controller
@@ -352,7 +451,7 @@ async fn lvs_pool_test() {
352451
// importing a pool with the wrong name should fail
353452
Lvs::create_or_import(PoolArgs {
354453
name: "jpool".into(),
355-
disks: vec!["aio:///tmp/disk1.img".into()],
454+
disks: vec![format!("aio://{DISKNAME1}")],
356455
uuid: None,
357456
cluster_size: None,
358457
backend: PoolBackend::Lvs,
@@ -369,7 +468,7 @@ async fn lvs_pool_test() {
369468
ms.spawn(async {
370469
let pool = Lvs::create_or_import(PoolArgs {
371470
name: "tpool2".into(),
372-
disks: vec!["/tmp/disk2.img".into()],
471+
disks: vec![format!("aio://{DISKNAME2}")],
373472
uuid: None,
374473
cluster_size: None,
375474
backend: PoolBackend::Lvs,
@@ -381,4 +480,6 @@ async fn lvs_pool_test() {
381480
.await;
382481

383482
common::delete_file(&[DISKNAME2.into()]);
483+
common::detach_loopdev(ldev.as_str());
484+
common::delete_file(&[DISKNAME3.into()]);
384485
}

scripts/clean-cargo-tests.sh

+19
Original file line numberDiff line numberDiff line change
@@ -1,8 +1,27 @@
1+
#!/bin/env bash
2+
13
SCRIPT_DIR="$(dirname "$0")"
24
ROOT_DIR=$(realpath "$SCRIPT_DIR/..")
35

46
sudo nvme disconnect-all
57

8+
# Detach any loop devices created for test purposes
9+
for back_file in "/tmp/io-engine-tests"/*; do
10+
# Find loop devices associated with the disk image
11+
devices=$(losetup -j "$back_file" -O NAME --noheadings)
12+
13+
# Detach each loop device found
14+
while IFS= read -r device; do
15+
if [ -n "$device" ]; then
16+
echo "Detaching loop device: $device"
17+
losetup -d "$device"
18+
fi
19+
done <<< "$devices"
20+
done
21+
# Delete the directory too
22+
rmdir --ignore-fail-on-non-empty "/tmp/io-engine-tests"
23+
24+
625
for c in $(docker ps -a --filter "label=io.composer.test.name" --format '{{.ID}}') ; do
726
docker kill "$c"
827
docker rm "$c"

0 commit comments

Comments
 (0)