@@ -1237,28 +1237,36 @@ metaslab_unload(metaslab_t *msp)
1237
1237
msp -> ms_weight &= ~METASLAB_ACTIVE_MASK ;
1238
1238
}
1239
1239
1240
- metaslab_t *
1241
- metaslab_init (metaslab_group_t * mg , uint64_t id , uint64_t object , uint64_t txg )
1240
+ int
1241
+ metaslab_init (metaslab_group_t * mg , uint64_t id , uint64_t object , uint64_t txg ,
1242
+ metaslab_t * * msp )
1242
1243
{
1243
1244
vdev_t * vd = mg -> mg_vd ;
1244
1245
objset_t * mos = vd -> vdev_spa -> spa_meta_objset ;
1245
- metaslab_t * msp ;
1246
+ metaslab_t * ms ;
1247
+ int error ;
1246
1248
1247
- msp = kmem_zalloc (sizeof (metaslab_t ), KM_PUSHPAGE );
1248
- mutex_init (& msp -> ms_lock , NULL , MUTEX_DEFAULT , NULL );
1249
- cv_init (& msp -> ms_load_cv , NULL , CV_DEFAULT , NULL );
1250
- msp -> ms_id = id ;
1251
- msp -> ms_start = id << vd -> vdev_ms_shift ;
1252
- msp -> ms_size = 1ULL << vd -> vdev_ms_shift ;
1249
+ ms = kmem_zalloc (sizeof (metaslab_t ), KM_PUSHPAGE );
1250
+ mutex_init (& ms -> ms_lock , NULL , MUTEX_DEFAULT , NULL );
1251
+ cv_init (& ms -> ms_load_cv , NULL , CV_DEFAULT , NULL );
1252
+ ms -> ms_id = id ;
1253
+ ms -> ms_start = id << vd -> vdev_ms_shift ;
1254
+ ms -> ms_size = 1ULL << vd -> vdev_ms_shift ;
1253
1255
1254
1256
/*
1255
1257
* We only open space map objects that already exist. All others
1256
1258
* will be opened when we finally allocate an object for it.
1257
1259
*/
1258
1260
if (object != 0 ) {
1259
- VERIFY0 (space_map_open (& msp -> ms_sm , mos , object , msp -> ms_start ,
1260
- msp -> ms_size , vd -> vdev_ashift , & msp -> ms_lock ));
1261
- ASSERT (msp -> ms_sm != NULL );
1261
+ error = space_map_open (& ms -> ms_sm , mos , object , ms -> ms_start ,
1262
+ ms -> ms_size , vd -> vdev_ashift , & ms -> ms_lock );
1263
+
1264
+ if (error != 0 ) {
1265
+ kmem_free (ms , sizeof (metaslab_t ));
1266
+ return (error );
1267
+ }
1268
+
1269
+ ASSERT (ms -> ms_sm != NULL );
1262
1270
}
1263
1271
1264
1272
/*
@@ -1268,11 +1276,11 @@ metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg)
1268
1276
* addition of new space; and for debugging, it ensures that we'd
1269
1277
* data fault on any attempt to use this metaslab before it's ready.
1270
1278
*/
1271
- msp -> ms_tree = range_tree_create (& metaslab_rt_ops , msp , & msp -> ms_lock );
1272
- metaslab_group_add (mg , msp );
1279
+ ms -> ms_tree = range_tree_create (& metaslab_rt_ops , ms , & ms -> ms_lock );
1280
+ metaslab_group_add (mg , ms );
1273
1281
1274
- msp -> ms_fragmentation = metaslab_fragmentation (msp );
1275
- msp -> ms_ops = mg -> mg_class -> mc_ops ;
1282
+ ms -> ms_fragmentation = metaslab_fragmentation (ms );
1283
+ ms -> ms_ops = mg -> mg_class -> mc_ops ;
1276
1284
1277
1285
/*
1278
1286
* If we're opening an existing pool (txg == 0) or creating
@@ -1281,25 +1289,27 @@ metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg)
1281
1289
* does not become available until after this txg has synced.
1282
1290
*/
1283
1291
if (txg <= TXG_INITIAL )
1284
- metaslab_sync_done (msp , 0 );
1292
+ metaslab_sync_done (ms , 0 );
1285
1293
1286
1294
/*
1287
1295
* If metaslab_debug_load is set and we're initializing a metaslab
1288
1296
* that has an allocated space_map object then load the its space
1289
1297
* map so that can verify frees.
1290
1298
*/
1291
- if (metaslab_debug_load && msp -> ms_sm != NULL ) {
1292
- mutex_enter (& msp -> ms_lock );
1293
- VERIFY0 (metaslab_load (msp ));
1294
- mutex_exit (& msp -> ms_lock );
1299
+ if (metaslab_debug_load && ms -> ms_sm != NULL ) {
1300
+ mutex_enter (& ms -> ms_lock );
1301
+ VERIFY0 (metaslab_load (ms ));
1302
+ mutex_exit (& ms -> ms_lock );
1295
1303
}
1296
1304
1297
1305
if (txg != 0 ) {
1298
1306
vdev_dirty (vd , 0 , NULL , txg );
1299
- vdev_dirty (vd , VDD_METASLAB , msp , txg );
1307
+ vdev_dirty (vd , VDD_METASLAB , ms , txg );
1300
1308
}
1301
1309
1302
- return (msp );
1310
+ * msp = ms ;
1311
+
1312
+ return (0 );
1303
1313
}
1304
1314
1305
1315
void
0 commit comments