Skip to content
Permalink
Browse files

3742 zfs comments need cleaner, more consistent style

Reviewed by: Matthew Ahrens <mahrens@delphix.com>
Reviewed by: George Wilson <george.wilson@delphix.com>
Reviewed by: Eric Schrock <eric.schrock@delphix.com>
Approved by: Christopher Siden <christopher.siden@delphix.com>
  • Loading branch information...
wca authored and Christopher Siden committed Jun 11, 2013
1 parent 3e30c24 commit f7170741490edba9d1d9c697c177c887172bc741
Showing with 242 additions and 269 deletions.
  1. +6 −8 usr/src/uts/common/fs/zfs/arc.c
  2. +1 −1 usr/src/uts/common/fs/zfs/bptree.c
  3. +10 −8 usr/src/uts/common/fs/zfs/dnode.c
  4. +2 −2 usr/src/uts/common/fs/zfs/dnode_sync.c
  5. +1 −1 usr/src/uts/common/fs/zfs/dsl_prop.c
  6. +1 −2 usr/src/uts/common/fs/zfs/sa.c
  7. +5 −6 usr/src/uts/common/fs/zfs/spa.c
  8. +1 −0 usr/src/uts/common/fs/zfs/spa_config.c
  9. +1 −1 usr/src/uts/common/fs/zfs/spa_misc.c
  10. +7 −8 usr/src/uts/common/fs/zfs/sys/ddt.h
  11. +2 −3 usr/src/uts/common/fs/zfs/sys/dnode.h
  12. +1 −0 usr/src/uts/common/fs/zfs/sys/dsl_pool.h
  13. +18 −19 usr/src/uts/common/fs/zfs/sys/sa_impl.h
  14. +1 −1 usr/src/uts/common/fs/zfs/sys/spa_impl.h
  15. +0 −1 usr/src/uts/common/fs/zfs/sys/space_map.h
  16. +1 −3 usr/src/uts/common/fs/zfs/sys/unique.h
  17. +6 −4 usr/src/uts/common/fs/zfs/sys/vdev_impl.h
  18. +20 −13 usr/src/uts/common/fs/zfs/sys/zap.h
  19. +6 −9 usr/src/uts/common/fs/zfs/sys/zap_leaf.h
  20. +3 −2 usr/src/uts/common/fs/zfs/sys/zfs_acl.h
  21. +7 −10 usr/src/uts/common/fs/zfs/sys/zfs_rlock.h
  22. +7 −9 usr/src/uts/common/fs/zfs/sys/zfs_znode.h
  23. +6 −7 usr/src/uts/common/fs/zfs/sys/zil.h
  24. +2 −3 usr/src/uts/common/fs/zfs/sys/zio_compress.h
  25. +3 −3 usr/src/uts/common/fs/zfs/txg.c
  26. +11 −7 usr/src/uts/common/fs/zfs/vdev.c
  27. +6 −5 usr/src/uts/common/fs/zfs/vdev_queue.c
  28. +4 −5 usr/src/uts/common/fs/zfs/vdev_raidz.c
  29. +5 −4 usr/src/uts/common/fs/zfs/zfs_acl.c
  30. +1 −0 usr/src/uts/common/fs/zfs/zfs_ctldir.c
  31. +4 −8 usr/src/uts/common/fs/zfs/zfs_ioctl.c
  32. +10 −11 usr/src/uts/common/fs/zfs/zfs_log.c
  33. +1 −1 usr/src/uts/common/fs/zfs/zfs_rlock.c
  34. +1 −1 usr/src/uts/common/fs/zfs/zfs_sa.c
  35. +10 −14 usr/src/uts/common/fs/zfs/zfs_vfsops.c
  36. +52 −69 usr/src/uts/common/fs/zfs/zfs_vnops.c
  37. +6 −11 usr/src/uts/common/fs/zfs/zfs_znode.c
  38. +3 −2 usr/src/uts/common/fs/zfs/zil.c
  39. +10 −7 usr/src/uts/common/fs/zfs/zio.c
@@ -58,11 +58,11 @@
* tight.
*
* 3. The Megiddo and Modha model assumes a fixed page size. All
* elements of the cache are therefor exactly the same size. So
* elements of the cache are therefore exactly the same size. So
* when adjusting the cache size following a cache miss, its simply
* a matter of choosing a single page to evict. In our model, we
* have variable sized cache blocks (rangeing from 512 bytes to
* 128K bytes). We therefor choose a set of blocks to evict to make
* 128K bytes). We therefore choose a set of blocks to evict to make
* space for a cache miss that approximates as closely as possible
* the space used by the new block.
*
@@ -77,7 +77,7 @@
* ways: 1) via a hash table lookup using the DVA as a key,
* or 2) via one of the ARC lists. The arc_read() interface
* uses method 1, while the internal arc algorithms for
* adjusting the cache use method 2. We therefor provide two
* adjusting the cache use method 2. We therefore provide two
* types of locks: 1) the hash table lock array, and 2) the
* arc list locks.
*
@@ -385,7 +385,7 @@ static arc_stats_t arc_stats = {
#define ARCSTAT(stat) (arc_stats.stat.value.ui64)

#define ARCSTAT_INCR(stat, val) \
atomic_add_64(&arc_stats.stat.value.ui64, (val));
atomic_add_64(&arc_stats.stat.value.ui64, (val))

#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
@@ -613,9 +613,7 @@ uint64_t zfs_crc64_table[256];
#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)

/*
* L2ARC Performance Tunables
*/
/* L2ARC Performance Tunables */
uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
@@ -3628,7 +3626,7 @@ arc_tempreserve_space(uint64_t reserve, uint64_t txg)

/*
* Writes will, almost always, require additional memory allocations
* in order to compress/encrypt/etc the data. We therefor need to
* in order to compress/encrypt/etc the data. We therefore need to
* make sure that there is sufficient available memory for this.
*/
if (error = arc_memory_throttle(reserve, anon_size, txg))
@@ -43,7 +43,7 @@
* dsl_scan_sync. This allows the delete operation to finish without traversing
* all the dataset's blocks.
*
* Note that while bt_begin and bt_end are only ever incremented in this code
* Note that while bt_begin and bt_end are only ever incremented in this code,
* they are effectively reset to 0 every time the entire bptree is freed because
* the bptree's object is destroyed and re-created.
*/
@@ -1803,14 +1803,16 @@ dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx)
}

/*
* This function scans a block at the indicated "level" looking for
* a hole or data (depending on 'flags'). If level > 0, then we are
* scanning an indirect block looking at its pointers. If level == 0,
* then we are looking at a block of dnodes. If we don't find what we
* are looking for in the block, we return ESRCH. Otherwise, return
* with *offset pointing to the beginning (if searching forwards) or
* end (if searching backwards) of the range covered by the block
* pointer we matched on (or dnode).
* Scans a block at the indicated "level" looking for a hole or data,
* depending on 'flags'.
*
* If level > 0, then we are scanning an indirect block looking at its
* pointers. If level == 0, then we are looking at a block of dnodes.
*
* If we don't find what we are looking for in the block, we return ESRCH.
* Otherwise, return with *offset pointing to the beginning (if searching
* forwards) or end (if searching backwards) of the range covered by the
* block pointer we matched on (or dnode).
*
* The basic search algorithm used below by dnode_next_offset() is to
* use this function to search up the block tree (widen the search) until
@@ -302,7 +302,7 @@ free_children(dmu_buf_impl_t *db, uint64_t blkid, uint64_t nblks, int trunc,
}

/*
* free_range: Traverse the indicated range of the provided file
* Traverse the indicated range of the provided file
* and "free" all the blocks contained there.
*/
static void
@@ -370,7 +370,7 @@ dnode_sync_free_range(dnode_t *dn, uint64_t blkid, uint64_t nblks, dmu_tx_t *tx)
}

/*
* Try to kick all the dnodes dbufs out of the cache...
* Try to kick all the dnode's dbufs out of the cache...
*/
void
dnode_evict_dbufs(dnode_t *dn)
@@ -380,7 +380,7 @@ dsl_prop_predict(dsl_dir_t *dd, const char *propname,

/*
* Unregister this callback. Return 0 on success, ENOENT if ddname is
* invalid, ENOMSG if no matching callback registered.
* invalid, or ENOMSG if no matching callback registered.
*/
int
dsl_prop_unregister(dsl_dataset_t *ds, const char *propname,
@@ -111,6 +111,7 @@
* location.
*
* Byteswap implications:
*
* Since the SA attributes are not entirely self describing we can't do
* the normal byteswap processing. The special ZAP layout attribute and
* attribute registration attributes define the byteswap function and the
@@ -189,7 +190,6 @@ sa_attr_reg_t sa_legacy_attrs[] = {
};

/*
* ZPL legacy layout
* This is only used for objects of type DMU_OT_ZNODE
*/
sa_attr_type_t sa_legacy_zpl_layout[] = {
@@ -199,7 +199,6 @@ sa_attr_type_t sa_legacy_zpl_layout[] = {
/*
* Special dummy layout used for buffers with no attributes.
*/

sa_attr_type_t sa_dummy_zpl_layout[] = { 0 };

static int sa_legacy_attr_count = 16;
@@ -4516,6 +4516,7 @@ spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)

/*
* Detach a device from a mirror or replacing vdev.
*
* If 'replace_done' is specified, only detach if the parent
* is a replacing vdev.
*/
@@ -5170,11 +5171,9 @@ spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
* the spa_vdev_config_[enter/exit] functions which allow us to
* grab and release the spa_config_lock while still holding the namespace
* lock. During each step the configuration is synced out.
*/

/*
* Remove a device from the pool. Currently, this supports removing only hot
* spares, slogs, and level 2 ARC devices.
*
* Currently, this supports removing only hot spares, slogs, and level 2 ARC
* devices.
*/
int
spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
@@ -5284,7 +5283,7 @@ spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)

/*
* Find any device that's done replacing, or a vdev marked 'unspare' that's
* current spared, so we can detach it.
* currently spared, so we can detach it.
*/
static vdev_t *
spa_vdev_resilver_done_hunt(vdev_t *vd)
@@ -317,6 +317,7 @@ spa_config_set(spa_t *spa, nvlist_t *config)

/*
* Generate the pool's configuration based on the current in-core state.
*
* We infer whether to generate a complete config or just one top-level config
* based on whether vd is the root vdev.
*/
@@ -1334,7 +1334,7 @@ zfs_panic_recover(const char *fmt, ...)

/*
* This is a stripped-down version of strtoull, suitable only for converting
* lowercase hexidecimal numbers that don't overflow.
* lowercase hexadecimal numbers that don't overflow.
*/
uint64_t
strtonum(const char *str, char **nptr)
@@ -63,16 +63,15 @@ enum ddt_class {
*/
typedef struct ddt_key {
zio_cksum_t ddk_cksum; /* 256-bit block checksum */
uint64_t ddk_prop; /* LSIZE, PSIZE, compression */
/*
* Encoded with logical & physical size, and compression, as follows:
* +-------+-------+-------+-------+-------+-------+-------+-------+
* | 0 | 0 | 0 | comp | PSIZE | LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*/
uint64_t ddk_prop;
} ddt_key_t;

/*
* ddk_prop layout:
*
* +-------+-------+-------+-------+-------+-------+-------+-------+
* | 0 | 0 | 0 | comp | PSIZE | LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*/
#define DDK_GET_LSIZE(ddk) \
BF64_GET_SB((ddk)->ddk_prop, 0, 16, SPA_MINBLOCKSHIFT, 1)
#define DDK_SET_LSIZE(ddk, x) \
@@ -145,9 +145,8 @@ typedef struct dnode_phys {

typedef struct dnode {
/*
* dn_struct_rwlock protects the structure of the dnode,
* including the number of levels of indirection (dn_nlevels),
* dn_maxblkid, and dn_next_*
* Protects the structure of the dnode, including the number of levels
* of indirection (dn_nlevels), dn_maxblkid, and dn_next_*
*/
krwlock_t dn_struct_rwlock;

@@ -110,6 +110,7 @@ typedef struct dsl_pool {

/*
* Protects administrative changes (properties, namespace)
*
* It is only held for write in syncing context. Therefore
* syncing context does not need to ever have it for read, since
* nobody else could possibly have it for write.
@@ -150,6 +150,7 @@ struct sa_os {

/*
* header for all bonus and spill buffers.
*
* The header has a fixed portion with a variable number
* of "lengths" depending on the number of variable sized
* attribues which are determined by the "layout number"
@@ -158,29 +159,27 @@ struct sa_os {
#define SA_MAGIC 0x2F505A /* ZFS SA */
typedef struct sa_hdr_phys {
uint32_t sa_magic;
uint16_t sa_layout_info; /* Encoded with hdrsize and layout number */
/*
* Encoded with hdrsize and layout number as follows:
* 16 10 0
* +--------+-------+
* | hdrsz |layout |
* +--------+-------+
*
* Bits 0-10 are the layout number
* Bits 11-16 are the size of the header.
* The hdrsize is the number * 8
*
* For example.
* hdrsz of 1 ==> 8 byte header
* 2 ==> 16 byte header
*
*/
uint16_t sa_layout_info;
uint16_t sa_lengths[1]; /* optional sizes for variable length attrs */
/* ... Data follows the lengths. */
} sa_hdr_phys_t;

/*
* sa_hdr_phys -> sa_layout_info
*
* 16 10 0
* +--------+-------+
* | hdrsz |layout |
* +--------+-------+
*
* Bits 0-10 are the layout number
* Bits 11-16 are the size of the header.
* The hdrsize is the number * 8
*
* For example.
* hdrsz of 1 ==> 8 byte header
* 2 ==> 16 byte header
*
*/

#define SA_HDR_LAYOUT_NUM(hdr) BF32_GET(hdr->sa_layout_info, 0, 10)
#define SA_HDR_SIZE(hdr) BF32_GET_SB(hdr->sa_layout_info, 10, 6, 3, 0)
#define SA_HDR_LAYOUT_INFO_ENCODE(x, num, size) \
@@ -239,7 +239,7 @@ struct spa {
kmutex_t spa_iokstat_lock; /* protects spa_iokstat_* */
struct kstat *spa_iokstat; /* kstat of io to this pool */
/*
* spa_refcnt & spa_config_lock must be the last elements
* spa_refcount & spa_config_lock must be the last elements
* because refcount_t changes size based on compilation options.
* In order for the MDB module to function correctly, the other
* fields must remain in the same location.
@@ -94,7 +94,6 @@ struct space_map_ops {
* 63 62 60 59 50 49 0
*
*
*
* non-debug entry
*
* 1 47 1 15
@@ -26,8 +26,6 @@
#ifndef _SYS_UNIQUE_H
#define _SYS_UNIQUE_H

#pragma ident "%Z%%M% %I% %E% SMI"

#include <sys/zfs_context.h>

#ifdef __cplusplus
@@ -42,7 +40,7 @@ void unique_fini(void);

/*
* Return a new unique value (which will not be uniquified against until
* it is unique_insert()-ed.
* it is unique_insert()-ed).
*/
uint64_t unique_create(void);

@@ -245,12 +245,13 @@ typedef struct vdev_label {
#define VDD_METASLAB 0x01
#define VDD_DTL 0x02

/* Offset of embedded boot loader region on each label */
#define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t))
/*
* Size and offset of embedded boot loader region on each label.
* Size of embedded boot loader region on each label.
* The total size of the first two labels plus the boot area is 4MB.
*/
#define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t))
#define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */
#define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */

/*
* Size of label regions at the start and end of each leaf device.
@@ -317,8 +318,9 @@ extern uint64_t vdev_get_min_asize(vdev_t *vd);
extern void vdev_set_min_asize(vdev_t *vd);

/*
* zdb uses this tunable, so it must be declared here to make lint happy.
* Global variables
*/
/* zdb uses this tunable, so it must be declared here to make lint happy. */
extern int zfs_vdev_cache_size;

/*

0 comments on commit f717074

Please sign in to comment.
You can’t perform that action at this time.