Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
Browse files

update to xnu-1504.9.17

  • Loading branch information...
commit 10a2cfc470cb35efd1da28979138f8f171e25c2a 1 parent 5a50200
@comex authored
Showing with 4,339 additions and 1,445 deletions.
  1. +11 −3 bsd/dev/unix_startup.c
  2. +2 −0  bsd/hfs/hfs.h
  3. +5 −4 bsd/hfs/hfs_catalog.c
  4. +73 −38 bsd/hfs/hfs_vfsops.c
  5. +25 −7 bsd/hfs/hfs_vfsutils.c
  6. +7 −0 bsd/hfs/hfs_vnops.c
  7. +3 −3 bsd/hfs/hfs_xattr.c
  8. +9 −9 bsd/hfs/hfscommon/Misc/FileExtentMapping.c
  9. +67 −29 bsd/hfs/hfscommon/Misc/VolumeAllocation.c
  10. +14 −3 bsd/hfs/hfscommon/headers/FileMgrInternal.h
  11. +79 −8 bsd/kern/kern_credential.c
  12. +1 −1  bsd/kern/kern_descrip.c
  13. +2 −0  bsd/kern/kern_fork.c
  14. +131 −128 bsd/kern/kern_malloc.c
  15. +12 −1 bsd/kern/kern_symfile.c
  16. +1 −1  bsd/kern/kpi_mbuf.c
  17. +2 −0  bsd/kern/tty.c
  18. +0 −2  bsd/kern/tty_ptmx.c
  19. +2 −0  bsd/kern/ubc_subr.c
  20. +1 −0  bsd/kern/uipc_socket.c
  21. +6 −1 bsd/net/raw_usrreq.c
  22. +1 −0  bsd/net/route.c
  23. +9 −4 bsd/netinet/in_pcb.c
  24. +14 −11 bsd/netinet/ip_input.c
  25. +1 −1  bsd/netinet/ip_mroute.c
  26. +114 −1 bsd/netinet/ip_output.c
  27. +1 −1  bsd/netinet6/in6_pcb.c
  28. +1 −11 bsd/netinet6/in6_proto.c
  29. +11 −5 bsd/netinet6/ip6_input.c
  30. +1 −1  bsd/netinet6/ip6_var.h
  31. +4 −3 bsd/netinet6/nd6.c
  32. +3 −2 bsd/nfs/nfs_socket.c
  33. +1 −0  bsd/sys/disk.h
  34. +1 −0  bsd/sys/kdebug.h
  35. +1 −0  bsd/sys/mount_internal.h
  36. +7 −3 bsd/vfs/vfs_bio.c
  37. +7 −10 bsd/vfs/vfs_cluster.c
  38. +203 −26 bsd/vfs/vfs_subr.c
  39. +3 −3 bsd/vfs/vfs_xattr.c
  40. +27 −6 bsd/vm/dp_backing_file.c
  41. +4 −0 config/IOKit.exports
  42. +1 −1  config/MasterVersion
  43. +1 −0  iokit/IOKit/IOBufferMemoryDescriptor.h
  44. +37 −10 iokit/IOKit/IOHibernatePrivate.h
  45. +4 −0 iokit/IOKit/IOMemoryDescriptor.h
  46. +7 −0 iokit/IOKit/IOMessage.h
  47. +99 −1 iokit/IOKit/pwr_mgt/IOPM.h
  48. +2 −1  iokit/IOKit/pwr_mgt/IOPMLibDefs.h
  49. +27 −1 iokit/IOKit/pwr_mgt/IOPMPrivate.h
  50. +132 −7 iokit/IOKit/pwr_mgt/RootDomain.h
  51. +45 −140 iokit/Kernel/IOBufferMemoryDescriptor.cpp
  52. +142 −111 iokit/Kernel/IODMACommand.cpp
  53. +281 −182 iokit/Kernel/IOHibernateIO.cpp
  54. +3 −1 iokit/Kernel/IOHibernateInternal.h
  55. +4 −33 iokit/Kernel/IOHibernateRestoreKernel.c
  56. +11 −9 iokit/Kernel/IOKitKernelInternal.h
  57. +22 −8 iokit/Kernel/IOLib.cpp
  58. +0 −21 iokit/Kernel/IOMapper.cpp
  59. +44 −16 iokit/Kernel/IOMemoryDescriptor.cpp
  60. +4 −4 iokit/Kernel/IOPMPowerStateQueue.cpp
  61. +4 −3 iokit/Kernel/IOPMPowerStateQueue.h
  62. +997 −28 iokit/Kernel/IOPMrootDomain.cpp
  63. +2 −2 iokit/Kernel/IOServicePM.cpp
  64. +21 −0 iokit/Kernel/RootDomainUserClient.cpp
  65. +3 −0  iokit/Kernel/RootDomainUserClient.h
  66. +0 −1  iokit/conf/files
  67. +109 −17 kgmacros
  68. +1 −1  osfmk/console/video_console.c
  69. +2 −1  osfmk/default_pager/default_pager.c
  70. +65 −32 osfmk/default_pager/dp_backing_store.c
  71. +6 −0 osfmk/device/device.defs
  72. +38 −4 osfmk/i386/acpi.c
  73. +5 −2 osfmk/i386/bsd_i386.c
  74. +0 −1  osfmk/i386/cpu_data.h
  75. +0 −1  osfmk/i386/cpuid.h
  76. +14 −0 osfmk/i386/hibernate_i386.c
  77. +5 −5 osfmk/i386/i386_init.c
  78. +78 −81 osfmk/i386/i386_vm_init.c
  79. +2 −2 osfmk/i386/machine_routines.c
  80. +0 −2  osfmk/i386/machine_routines.h
  81. +26 −1 osfmk/i386/machine_routines_asm.s
  82. +2 −1  osfmk/i386/misc_protos.h
  83. +19 −5 osfmk/i386/pcb.c
  84. +1 −0  osfmk/i386/pmCPU.c
  85. +2 −1  osfmk/i386/pmCPU.h
  86. +32 −17 osfmk/i386/pmap.c
  87. +1 −0  osfmk/i386/pmap_internal.h
  88. +53 −0 osfmk/i386/pmap_x86_common.c
  89. +20 −1 osfmk/i386/rtclock.c
  90. +5 −1 osfmk/i386/rtclock.h
  91. +5 −0 osfmk/ipc/ipc_init.c
  92. +6 −4 osfmk/kern/hibernate.c
  93. +2 −2 osfmk/kern/host.c
  94. +2 −0  osfmk/kern/mk_timer.c
  95. +71 −57 osfmk/kern/sched_prim.c
  96. +1 −0  osfmk/kern/sync_sema.c
  97. +1 −0  osfmk/kern/task.c
  98. +1 −0  osfmk/kern/thread.c
  99. +1 −0  osfmk/kern/thread_call.c
  100. +5 −0 osfmk/kern/wait_queue.c
  101. +22 −8 osfmk/kern/zalloc.c
  102. +3 −1 osfmk/kern/zalloc.h
  103. +2 −1  osfmk/mach/memory_object_types.h
  104. +3 −0  osfmk/ppc/hibernate_ppc.c
  105. +25 −0 osfmk/ppc/pmap.c
  106. +8 −3 osfmk/vm/bsd_vm.c
  107. +7 −3 osfmk/vm/memory_object.c
  108. +5 −3 osfmk/vm/pmap.h
  109. +1 −0  osfmk/vm/vm_apple_protect.c
  110. +12 −4 osfmk/vm/vm_fault.c
  111. +11 −20 osfmk/vm/vm_kern.c
  112. +1 −0  osfmk/vm/vm_kern.h
  113. +7 −0 osfmk/vm/vm_map.c
  114. +17 −11 osfmk/vm/vm_object.c
  115. +1 −0  osfmk/vm/vm_object.h
  116. +17 −7 osfmk/vm/vm_page.h
  117. +109 −69 osfmk/vm/vm_pageout.c
  118. +50 −5 osfmk/vm/vm_pageout.h
  119. +665 −154 osfmk/vm/vm_resident.c
  120. +6 −4 osfmk/vm/vm_user.c
  121. +18 −1 osfmk/x86_64/machine_routines_asm.s
  122. +3 −3 osfmk/x86_64/pmap.c
  123. +5 −3 osfmk/x86_64/start.s
View
14 bsd/dev/unix_startup.c
@@ -61,7 +61,7 @@ extern uint32_t tcp_recvspace;
void bsd_bufferinit(void) __attribute__((section("__TEXT, initcode")));
extern void md_prepare_for_shutdown(int, int, char *);
-unsigned int bsd_mbuf_cluster_reserve(void);
+unsigned int bsd_mbuf_cluster_reserve(boolean_t *);
void bsd_srv_setup(int);
void bsd_exec_setup(int);
@@ -159,7 +159,7 @@ bsd_startupearly(void)
#endif
int scale;
- nmbclusters = bsd_mbuf_cluster_reserve() / MCLBYTES;
+ nmbclusters = bsd_mbuf_cluster_reserve(NULL) / MCLBYTES;
#if INET || INET6
if ((scale = nmbclusters / NMBCLUSTERS) > 1) {
@@ -237,9 +237,10 @@ bsd_bufferinit(void)
* memory that is present.
*/
unsigned int
-bsd_mbuf_cluster_reserve(void)
+bsd_mbuf_cluster_reserve(boolean_t *overridden)
{
int mbuf_pool = 0;
+ static boolean_t was_overridden = FALSE;
/* If called more than once, return the previously calculated size */
if (mbuf_poolsz != 0)
@@ -263,6 +264,10 @@ bsd_mbuf_cluster_reserve(void)
ncl = (mbuf_pool << MBSHIFT) >> MCLSHIFT;
if (sane_size > (64 * 1024 * 1024) || ncl != 0) {
+
+ if (ncl || srv)
+ was_overridden = TRUE;
+
if ((nmbclusters = ncl) == 0) {
/* Auto-configure the mbuf pool size */
nmbclusters = mbuf_default_ncl(srv, sane_size);
@@ -278,6 +283,9 @@ bsd_mbuf_cluster_reserve(void)
}
mbuf_poolsz = nmbclusters << MCLSHIFT;
done:
+ if (overridden)
+ *overridden = was_overridden;
+
return (mbuf_poolsz);
}
#if defined(__LP64__)
View
2  bsd/hfs/hfs.h
@@ -716,6 +716,8 @@ extern u_int32_t hfs_freeblks(struct hfsmount * hfsmp, int wantreserve);
short MacToVFSError(OSErr err);
+void hfs_metadatazone_init(struct hfsmount *hfsmp);
+
/* HFS directory hint functions. */
extern directoryhint_t * hfs_getdirhint(struct cnode *, int, int);
extern void hfs_reldirhint(struct cnode *, directoryhint_t *);
View
9 bsd/hfs/hfs_catalog.c
@@ -2111,7 +2111,7 @@ cat_createlink(struct hfsmount *hfsmp, struct cat_desc *descp, struct cat_attr *
}
if (alias_allocated && rsrcforkp->extents[0].startBlock != 0) {
(void) BlockDeallocate(hfsmp, rsrcforkp->extents[0].startBlock,
- rsrcforkp->extents[0].blockCount);
+ rsrcforkp->extents[0].blockCount, 0);
rsrcforkp->extents[0].startBlock = 0;
rsrcforkp->extents[0].blockCount = 0;
}
@@ -2210,7 +2210,8 @@ cat_makealias(struct hfsmount *hfsmp, u_int32_t inode_num, struct HFSPlusCatalog
bzero(rsrcforkp, sizeof(HFSPlusForkData));
/* Allocate some disk space for the alias content. */
- result = BlockAllocate(hfsmp, 0, blkcount, blkcount, 1, 1,
+ result = BlockAllocate(hfsmp, 0, blkcount, blkcount,
+ HFS_ALLOC_FORCECONTIG | HFS_ALLOC_METAZONE,
&rsrcforkp->extents[0].startBlock,
&rsrcforkp->extents[0].blockCount);
if (result) {
@@ -2265,7 +2266,7 @@ cat_makealias(struct hfsmount *hfsmp, u_int32_t inode_num, struct HFSPlusCatalog
exit:
if (result && rsrcforkp->extents[0].startBlock != 0) {
- (void) BlockDeallocate(hfsmp, rsrcforkp->extents[0].startBlock, rsrcforkp->extents[0].blockCount);
+ (void) BlockDeallocate(hfsmp, rsrcforkp->extents[0].startBlock, rsrcforkp->extents[0].blockCount, 0);
rsrcforkp->extents[0].startBlock = 0;
rsrcforkp->extents[0].blockCount = 0;
rsrcforkp->logicalSize = 0;
@@ -2329,7 +2330,7 @@ cat_deletelink(struct hfsmount *hfsmp, struct cat_desc *descp)
(void) BlockDeallocate(hfsmp,
file.resourceFork.extents[i].startBlock,
- file.resourceFork.extents[i].blockCount);
+ file.resourceFork.extents[i].blockCount, 0);
totalBlocks -= file.resourceFork.extents[i].blockCount;
file.resourceFork.extents[i].startBlock = 0;
View
111 bsd/hfs/hfs_vfsops.c
@@ -368,7 +368,8 @@ hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t conte
/*
* Allow hot file clustering if conditions allow.
*/
- if (hfsmp->hfs_flags & HFS_METADATA_ZONE) {
+ if ((hfsmp->hfs_flags & HFS_METADATA_ZONE) &&
+ ((hfsmp->hfs_mp->mnt_kern_flag & MNTK_SSD) == 0)) {
(void) hfs_recording_init(hfsmp);
}
/* Force ACLs on HFS+ file systems. */
@@ -3762,9 +3763,10 @@ hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context)
}
}
- /*
- * TODO: Adjust the size of the metadata zone based on new volume size?
+ /*
+ * Update the metadata zone size based on current volume size
*/
+ hfs_metadatazone_init(hfsmp);
/*
* Adjust the size of hfsmp->hfs_attrdata_vp
@@ -3900,14 +3902,27 @@ hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context)
hfsmp->allocLimit = newblkcnt - 2;
else
hfsmp->allocLimit = newblkcnt - 1;
- /* Update the volume free block count to reflect the total number of
- * free blocks that will exist after a successful resize.
+ /*
+ * Update the volume free block count to reflect the total number
+ * of free blocks that will exist after a successful resize.
+ * Relocation of extents will result in no net change in the total
+ * free space on the disk. Therefore the code that allocates
+ * space for new extent and deallocates the old extent explicitly
+ * prevents updating the volume free block count. It will also
+ * prevent false disk full error when the number of blocks in
+ * an extent being relocated is more than the free blocks that
+ * will exist after the volume is resized.
*/
hfsmp->freeBlocks -= reclaimblks;
updateFreeBlocks = true;
HFS_MOUNT_UNLOCK(hfsmp, TRUE);
/*
+ * Update the metadata zone size, and, if required, disable it
+ */
+ hfs_metadatazone_init(hfsmp);
+
+ /*
* Look for files that have blocks at or beyond the location of the
* new alternate volume header
*/
@@ -4002,10 +4017,6 @@ hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context)
panic("hfs_truncatefs: unexpected error flushing volume header (%d)\n", error);
/*
- * TODO: Adjust the size of the metadata zone based on new volume size?
- */
-
- /*
* Adjust the size of hfsmp->hfs_attrdata_vp
*/
if (hfsmp->hfs_attrdata_vp) {
@@ -4034,6 +4045,10 @@ hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context)
hfsmp->nextAllocation = hfsmp->hfs_metazone_end + 1;
hfsmp->hfs_flags &= ~HFS_RESIZE_IN_PROGRESS;
HFS_MOUNT_UNLOCK(hfsmp, TRUE);
+ /* On error, reset the metadata zone for original volume size */
+ if (error && (updateFreeBlocks == true)) {
+ hfs_metadatazone_init(hfsmp);
+ }
if (lockflags) {
hfs_systemfile_unlock(hfsmp, lockflags);
@@ -4250,6 +4265,7 @@ hfs_reclaim_file(struct hfsmount *hfsmp, struct vnode *vp, u_long startblk, int
struct BTreeIterator *iterator = NULL;
u_int8_t forktype;
u_int32_t fileID;
+ u_int32_t alloc_flags;
/* If there is no vnode for this file, then there's nothing to do. */
if (vp == NULL)
@@ -4343,25 +4359,32 @@ hfs_reclaim_file(struct hfsmount *hfsmp, struct vnode *vp, u_long startblk, int
end_block = oldStartBlock + oldBlockCount;
/* Check if the file overlaps the target space */
if (end_block > startblk) {
- /* Allocate a new extent */
- error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, true, (is_sysfile ? true : false), &newStartBlock, &newBlockCount);
- if (error) {
- printf("hfs_reclaim_file: BlockAllocate (error=%d) for fileID=%u %u:(%u,%u)\n", error, fileID, i, oldStartBlock, oldBlockCount);
- goto fail;
+ alloc_flags = HFS_ALLOC_FORCECONTIG | HFS_ALLOC_SKIPFREEBLKS;
+ if (is_sysfile) {
+ alloc_flags |= HFS_ALLOC_METAZONE;
}
- if (newBlockCount != oldBlockCount) {
- printf("hfs_reclaim_file: fileID=%u - newBlockCount=%u, oldBlockCount=%u", fileID, newBlockCount, oldBlockCount);
- if (BlockDeallocate(hfsmp, newStartBlock, newBlockCount)) {
- hfs_mark_volume_inconsistent(hfsmp);
+ error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, alloc_flags, &newStartBlock, &newBlockCount);
+ if (error) {
+ if (!is_sysfile && ((error == dskFulErr) || (error == ENOSPC))) {
+ /* Try allocating again using the metadata zone */
+ alloc_flags |= HFS_ALLOC_METAZONE;
+ error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, alloc_flags, &newStartBlock, &newBlockCount);
+ }
+ if (error) {
+ printf("hfs_reclaim_file: BlockAllocate(metazone) (error=%d) for fileID=%u %u:(%u,%u)\n", error, fileID, i, oldStartBlock, oldBlockCount);
+ goto fail;
+ } else {
+ if (hfs_resize_debug) {
+ printf("hfs_reclaim_file: BlockAllocate(metazone) success for fileID=%u %u:(%u,%u)\n", fileID, i, newStartBlock, newBlockCount);
+ }
}
- goto fail;
}
/* Copy data from old location to new location */
error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, newBlockCount, context);
if (error) {
printf("hfs_reclaim_file: hfs_copy_extent error=%d for fileID=%u %u:(%u,%u) to %u:(%u,%u)\n", error, fileID, i, oldStartBlock, oldBlockCount, i, newStartBlock, newBlockCount);
- if (BlockDeallocate(hfsmp, newStartBlock, newBlockCount)) {
+ if (BlockDeallocate(hfsmp, newStartBlock, newBlockCount, HFS_ALLOC_SKIPFREEBLKS)) {
hfs_mark_volume_inconsistent(hfsmp);
}
goto fail;
@@ -4371,7 +4394,7 @@ hfs_reclaim_file(struct hfsmount *hfsmp, struct vnode *vp, u_long startblk, int
*blks_moved += newBlockCount;
/* Deallocate the old extent */
- error = BlockDeallocate(hfsmp, oldStartBlock, oldBlockCount);
+ error = BlockDeallocate(hfsmp, oldStartBlock, oldBlockCount, HFS_ALLOC_SKIPFREEBLKS);
if (error) {
printf("hfs_reclaim_file: BlockDeallocate returned %d\n", error);
hfs_mark_volume_inconsistent(hfsmp);
@@ -4445,22 +4468,30 @@ hfs_reclaim_file(struct hfsmount *hfsmp, struct vnode *vp, u_long startblk, int
oldBlockCount = record[i].blockCount;
end_block = oldStartBlock + oldBlockCount;
if (end_block > startblk) {
- error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, true, (is_sysfile ? true : false), &newStartBlock, &newBlockCount);
- if (error) {
- printf("hfs_reclaim_file: BlockAllocate (error=%d) for fileID=%u %u:(%u,%u)\n", error, fileID, i, oldStartBlock, oldBlockCount);
- goto fail;
+ alloc_flags = HFS_ALLOC_FORCECONTIG | HFS_ALLOC_SKIPFREEBLKS;
+ if (is_sysfile) {
+ alloc_flags |= HFS_ALLOC_METAZONE;
}
- if (newBlockCount != oldBlockCount) {
- printf("hfs_reclaim_file: fileID=%u - newBlockCount=%u, oldBlockCount=%u", fileID, newBlockCount, oldBlockCount);
- if (BlockDeallocate(hfsmp, newStartBlock, newBlockCount)) {
- hfs_mark_volume_inconsistent(hfsmp);
+ error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, alloc_flags, &newStartBlock, &newBlockCount);
+ if (error) {
+ if (!is_sysfile && ((error == dskFulErr) || (error == ENOSPC))) {
+ /* Try allocating again using the metadata zone */
+ alloc_flags |= HFS_ALLOC_METAZONE;
+ error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, alloc_flags, &newStartBlock, &newBlockCount);
+ }
+ if (error) {
+ printf("hfs_reclaim_file: BlockAllocate(metazone) (error=%d) for fileID=%u %u:(%u,%u)\n", error, fileID, i, oldStartBlock, oldBlockCount);
+ goto fail;
+ } else {
+ if (hfs_resize_debug) {
+ printf("hfs_reclaim_file: BlockAllocate(metazone) success for fileID=%u %u:(%u,%u)\n", fileID, i, newStartBlock, newBlockCount);
+ }
}
- goto fail;
}
error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, newBlockCount, context);
if (error) {
printf("hfs_reclaim_file: hfs_copy_extent error=%d for fileID=%u (%u,%u) to (%u,%u)\n", error, fileID, oldStartBlock, oldBlockCount, newStartBlock, newBlockCount);
- if (BlockDeallocate(hfsmp, newStartBlock, newBlockCount)) {
+ if (BlockDeallocate(hfsmp, newStartBlock, newBlockCount, HFS_ALLOC_SKIPFREEBLKS)) {
hfs_mark_volume_inconsistent(hfsmp);
}
goto fail;
@@ -4483,7 +4514,7 @@ hfs_reclaim_file(struct hfsmount *hfsmp, struct vnode *vp, u_long startblk, int
hfs_mark_volume_inconsistent(hfsmp);
goto fail;
}
- error = BlockDeallocate(hfsmp, oldStartBlock, oldBlockCount);
+ error = BlockDeallocate(hfsmp, oldStartBlock, oldBlockCount, HFS_ALLOC_SKIPFREEBLKS);
if (error) {
printf("hfs_reclaim_file: BlockDeallocate returned %d\n", error);
hfs_mark_volume_inconsistent(hfsmp);
@@ -4608,7 +4639,9 @@ hfs_reclaim_journal_file(struct hfsmount *hfsmp, vfs_context_t context)
oldBlockCount = hfsmp->jnl_size / hfsmp->blockSize;
/* TODO: Allow the journal to change size based on the new volume size. */
- error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, true, true, &newStartBlock, &newBlockCount);
+ error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount,
+ HFS_ALLOC_METAZONE | HFS_ALLOC_FORCECONTIG | HFS_ALLOC_SKIPFREEBLKS,
+ &newStartBlock, &newBlockCount);
if (error) {
printf("hfs_reclaim_journal_file: BlockAllocate returned %d\n", error);
goto fail;
@@ -4618,7 +4651,7 @@ hfs_reclaim_journal_file(struct hfsmount *hfsmp, vfs_context_t context)
goto free_fail;
}
- error = BlockDeallocate(hfsmp, hfsmp->jnl_start, oldBlockCount);
+ error = BlockDeallocate(hfsmp, hfsmp->jnl_start, oldBlockCount, HFS_ALLOC_SKIPFREEBLKS);
if (error) {
printf("hfs_reclaim_journal_file: BlockDeallocate returned %d\n", error);
goto free_fail;
@@ -4668,7 +4701,7 @@ hfs_reclaim_journal_file(struct hfsmount *hfsmp, vfs_context_t context)
return error;
free_fail:
- (void) BlockDeallocate(hfsmp, newStartBlock, newBlockCount);
+ (void) BlockDeallocate(hfsmp, newStartBlock, newBlockCount, HFS_ALLOC_SKIPFREEBLKS);
fail:
hfs_systemfile_unlock(hfsmp, lockflags);
(void) hfs_end_transaction(hfsmp);
@@ -4704,7 +4737,9 @@ hfs_reclaim_journal_info_block(struct hfsmount *hfsmp, vfs_context_t context)
}
lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
- error = BlockAllocate(hfsmp, 1, 1, 1, true, true, &newBlock, &blockCount);
+ error = BlockAllocate(hfsmp, 1, 1, 1,
+ HFS_ALLOC_METAZONE | HFS_ALLOC_FORCECONTIG | HFS_ALLOC_SKIPFREEBLKS,
+ &newBlock, &blockCount);
if (error) {
printf("hfs_reclaim_journal_info_block: BlockAllocate returned %d\n", error);
goto fail;
@@ -4713,7 +4748,7 @@ hfs_reclaim_journal_info_block(struct hfsmount *hfsmp, vfs_context_t context)
printf("hfs_reclaim_journal_info_block: blockCount != 1 (%u)\n", blockCount);
goto free_fail;
}
- error = BlockDeallocate(hfsmp, hfsmp->vcbJinfoBlock, 1);
+ error = BlockDeallocate(hfsmp, hfsmp->vcbJinfoBlock, 1, HFS_ALLOC_SKIPFREEBLKS);
if (error) {
printf("hfs_reclaim_journal_info_block: BlockDeallocate returned %d\n", error);
goto free_fail;
@@ -4788,7 +4823,7 @@ hfs_reclaim_journal_info_block(struct hfsmount *hfsmp, vfs_context_t context)
return error;
free_fail:
- (void) BlockDeallocate(hfsmp, newBlock, blockCount);
+ (void) BlockDeallocate(hfsmp, newBlock, blockCount, HFS_ALLOC_SKIPFREEBLKS);
fail:
hfs_systemfile_unlock(hfsmp, lockflags);
(void) hfs_end_transaction(hfsmp);
View
32 bsd/hfs/hfs_vfsutils.c
@@ -66,7 +66,6 @@
static void ReleaseMetaFileVNode(struct vnode *vp);
static int hfs_late_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, void *_args);
-static void hfs_metadatazone_init(struct hfsmount *);
static u_int32_t hfs_hotfile_freeblocks(struct hfsmount *);
@@ -733,7 +732,8 @@ OSErr hfs_MountHFSPlusVolume(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp,
* Allow hot file clustering if conditions allow.
*/
if ((hfsmp->hfs_flags & HFS_METADATA_ZONE) &&
- ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0)) {
+ ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) &&
+ ((hfsmp->hfs_mp->mnt_kern_flag & MNTK_SSD) == 0)) {
(void) hfs_recording_init(hfsmp);
}
@@ -2401,7 +2401,7 @@ hfs_late_journal_init(struct hfsmount *hfsmp, HFSPlusVolumeHeader *vhp, void *_a
#define HOTBAND_MINIMUM_SIZE (10*1024*1024)
#define HOTBAND_MAXIMUM_SIZE (512*1024*1024)
-static void
+void
hfs_metadatazone_init(struct hfsmount *hfsmp)
{
ExtendedVCB *vcb;
@@ -2413,7 +2413,7 @@ hfs_metadatazone_init(struct hfsmount *hfsmp)
int items, really_do_it=1;
vcb = HFSTOVCB(hfsmp);
- fs_size = (u_int64_t)vcb->blockSize * (u_int64_t)vcb->totalBlocks;
+ fs_size = (u_int64_t)vcb->blockSize * (u_int64_t)vcb->allocLimit;
/*
* For volumes less than 10 GB, don't bother.
@@ -2535,16 +2535,34 @@ hfs_metadatazone_init(struct hfsmount *hfsmp)
hfsmp->hfs_min_alloc_start = zonesize / vcb->blockSize;
/*
* If doing the round up for hfs_min_alloc_start would push us past
- * totalBlocks, then just reset it back to 0. Though using a value
- * bigger than totalBlocks would not cause damage in the block allocator
+ * allocLimit, then just reset it back to 0. Though using a value
+ * bigger than allocLimit would not cause damage in the block allocator
* code, this value could get stored in the volume header and make it out
* to disk, making the volume header technically corrupt.
*/
- if (hfsmp->hfs_min_alloc_start >= hfsmp->totalBlocks) {
+ if (hfsmp->hfs_min_alloc_start >= hfsmp->allocLimit) {
hfsmp->hfs_min_alloc_start = 0;
}
if (really_do_it == 0) {
+ /* If metadata zone needs to be disabled because the
+ * volume was truncated, clear the bit and zero out
+ * the values that are no longer needed.
+ */
+ if (hfsmp->hfs_flags & HFS_METADATA_ZONE) {
+ /* Disable metadata zone */
+ hfsmp->hfs_flags &= ~HFS_METADATA_ZONE;
+
+ /* Zero out mount point values that are not required */
+ hfsmp->hfs_catalog_maxblks = 0;
+ hfsmp->hfs_hotfile_maxblks = 0;
+ hfsmp->hfs_hotfile_start = 0;
+ hfsmp->hfs_hotfile_end = 0;
+ hfsmp->hfs_hotfile_freeblks = 0;
+ hfsmp->hfs_metazone_start = 0;
+ hfsmp->hfs_metazone_end = 0;
+ }
+
return;
}
View
7 bsd/hfs/hfs_vnops.c
@@ -4119,6 +4119,13 @@ hfs_makenode(struct vnode *dvp, struct vnode **vpp, struct componentname *cnp,
/* set the cnode pointer only after successfully acquiring lock */
dcp = VTOC(dvp);
+
+ /* Don't allow creation of new entries in open-unlinked directories */
+ if ((error = hfs_checkdeleted (dcp))) {
+ hfs_unlock (dcp);
+ return error;
+ }
+
dcp->c_flag |= C_DIR_MODIFICATION;
hfsmp = VTOHFS(dvp);
View
6 bsd/hfs/hfs_xattr.c
@@ -2237,7 +2237,7 @@ alloc_attr_blks(struct hfsmount *hfsmp, size_t attrsize, size_t extentbufsize, H
lockflags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
for (i = 0; (blkcnt > 0) && (i < maxextents); i++) {
- result = BlockAllocate(hfsmp, startblk, blkcnt, blkcnt, 0, 0,
+ result = BlockAllocate(hfsmp, startblk, blkcnt, blkcnt, 0,
&extents[i].startBlock, &extents[i].blockCount);
#if HFS_XATTR_VERBOSE
printf("hfs: alloc_attr_blks: BA blkcnt %d [%d, %d] (%d)\n",
@@ -2262,7 +2262,7 @@ alloc_attr_blks(struct hfsmount *hfsmp, size_t attrsize, size_t extentbufsize, H
#endif
for (; i <= 0; i--) {
if ((blkcnt = extents[i].blockCount) != 0) {
- (void) BlockDeallocate(hfsmp, extents[i].startBlock, blkcnt);
+ (void) BlockDeallocate(hfsmp, extents[i].startBlock, blkcnt, 0);
extents[i].startBlock = 0;
extents[i].blockCount = 0;
}
@@ -2301,7 +2301,7 @@ free_attr_blks(struct hfsmount *hfsmp, int blkcnt, HFSPlusExtentDescriptor *exte
if (extents[i].startBlock == 0) {
break;
}
- (void)BlockDeallocate(hfsmp, extents[i].startBlock, extents[i].blockCount);
+ (void)BlockDeallocate(hfsmp, extents[i].startBlock, extents[i].blockCount, 0);
remblks -= extents[i].blockCount;
extents[i].startBlock = 0;
extents[i].blockCount = 0;
View
18 bsd/hfs/hfscommon/Misc/FileExtentMapping.c
@@ -576,7 +576,7 @@ static OSErr ReleaseExtents(
break;
}
- err = BlockDeallocate( vcb, extentRecord[extentIndex].startBlock, numAllocationBlocks );
+ err = BlockDeallocate( vcb, extentRecord[extentIndex].startBlock, numAllocationBlocks , 0);
if ( err != noErr )
break;
@@ -1128,8 +1128,8 @@ OSErr ExtendFileC (
startBlock,
howmany(MIN(bytesToAdd, availbytes), volumeBlockSize),
howmany(MIN(maximumBytes, availbytes), volumeBlockSize),
- wantContig,
- useMetaZone,
+ (wantContig ? HFS_ALLOC_FORCECONTIG : 0) |
+ (useMetaZone ? HFS_ALLOC_METAZONE : 0),
&actualStartBlock,
&actualNumBlocks);
}
@@ -1175,7 +1175,7 @@ OSErr ExtendFileC (
if (foundIndex == numExtentsPerRecord) {
// This record is full. Need to create a new one.
if (FTOC(fcb)->c_fileid == kHFSExtentsFileID) {
- (void) BlockDeallocate(vcb, actualStartBlock, actualNumBlocks);
+ (void) BlockDeallocate(vcb, actualStartBlock, actualNumBlocks, 0);
err = dskFulErr; // Oops. Can't extend extents file past first record.
break;
}
@@ -1206,7 +1206,7 @@ OSErr ExtendFileC (
// We couldn't create an extent record because extents B-tree
// couldn't grow. Dellocate the extent just allocated and
// return a disk full error.
- (void) BlockDeallocate(vcb, actualStartBlock, actualNumBlocks);
+ (void) BlockDeallocate(vcb, actualStartBlock, actualNumBlocks, 0);
err = dskFulErr;
}
if (err != noErr) break;
@@ -1398,7 +1398,7 @@ OSErr TruncateFileC (
// Compute first volume allocation block to free
startBlock = extentRecord[extentIndex].startBlock + extentRecord[extentIndex].blockCount - numBlocks;
// Free the blocks in bitmap
- err = BlockDeallocate(vcb, startBlock, numBlocks);
+ err = BlockDeallocate(vcb, startBlock, numBlocks, 0);
if (err != noErr) goto ErrorExit;
// Adjust length of this extent
extentRecord[extentIndex].blockCount -= numBlocks;
@@ -1422,7 +1422,7 @@ OSErr TruncateFileC (
while (extentIndex < numExtentsPerRecord && extentRecord[extentIndex].blockCount != 0) {
numBlocks = extentRecord[extentIndex].blockCount;
// Deallocate this extent
- err = BlockDeallocate(vcb, extentRecord[extentIndex].startBlock, numBlocks);
+ err = BlockDeallocate(vcb, extentRecord[extentIndex].startBlock, numBlocks, 0);
if (err != noErr) goto ErrorExit;
// Update next file allocation block number
nextBlock += numBlocks;
@@ -1502,7 +1502,7 @@ OSErr HeadTruncateFile (
break; /* end of extents */
if (blksfreed < headblks) {
- error = BlockDeallocate(vcb, fcb->fcbExtents[i].startBlock, blkcnt);
+ error = BlockDeallocate(vcb, fcb->fcbExtents[i].startBlock, blkcnt, 0);
/*
* Any errors after the first BlockDeallocate
* must be ignored so we can put the file in
@@ -1560,7 +1560,7 @@ OSErr HeadTruncateFile (
break; /* end of extents */
if (blksfreed < headblks) {
- error = BlockDeallocate(vcb, extents[i].startBlock, blkcnt);
+ error = BlockDeallocate(vcb, extents[i].startBlock, blkcnt, 0);
if (error) {
printf("hfs: HeadTruncateFile: problems deallocating %s (%d)\n",
FTOC(fcb)->c_desc.cd_nameptr ? (const char *)FTOC(fcb)->c_desc.cd_nameptr : "", error);
View
96 bsd/hfs/hfscommon/Misc/VolumeAllocation.c
@@ -238,10 +238,7 @@ OSErr BlockAllocate (
u_int32_t startingBlock, /* preferred starting block, or 0 for no preference */
u_int32_t minBlocks, /* desired number of blocks to allocate */
u_int32_t maxBlocks, /* maximum number of blocks to allocate */
- Boolean forceContiguous, /* non-zero to force contiguous allocation and to force */
- /* minBlocks bytes to actually be allocated */
-
- Boolean useMetaZone,
+ u_int32_t flags, /* option flags */
u_int32_t *actualStartBlock, /* actual first block of allocation */
u_int32_t *actualNumBlocks) /* number of blocks actually allocated; if forceContiguous */
/* was zero, then this may represent fewer than minBlocks */
@@ -249,6 +246,20 @@ OSErr BlockAllocate (
u_int32_t freeBlocks;
OSErr err;
Boolean updateAllocPtr = false; // true if nextAllocation needs to be updated
+ Boolean useMetaZone;
+ Boolean forceContiguous;
+
+ if (flags & HFS_ALLOC_FORCECONTIG) {
+ forceContiguous = true;
+ } else {
+ forceContiguous = false;
+ }
+
+ if (flags & HFS_ALLOC_METAZONE) {
+ useMetaZone = true;
+ } else {
+ useMetaZone = false;
+ }
//
// Initialize outputs in case we get an error
@@ -257,25 +268,38 @@ OSErr BlockAllocate (
*actualNumBlocks = 0;
freeBlocks = hfs_freeblks(VCBTOHFS(vcb), 0);
- //
- // If the disk is already full, don't bother.
- //
- if (freeBlocks == 0) {
- err = dskFulErr;
- goto Exit;
- }
- if (forceContiguous && freeBlocks < minBlocks) {
- err = dskFulErr;
- goto Exit;
- }
- /*
- * Clip if necessary so we don't over-subscribe the free blocks.
+ /* Skip free block check if blocks are being allocated for relocating
+ * data during truncating a volume.
+ *
+ * During hfs_truncatefs(), the volume free block count is updated
+ * before relocating data to reflect the total number of free blocks
+ * that will exist on the volume after resize is successful. This
+ * means that we have reserved allocation blocks required for relocating
+ * the data and hence there is no need to check the free blocks.
+ * It will also prevent resize failure when the number of blocks in
+ * an extent being relocated is more than the free blocks that will
+ * exist after the volume is resized.
*/
- if (minBlocks > freeBlocks) {
- minBlocks = freeBlocks;
- }
- if (maxBlocks > freeBlocks) {
- maxBlocks = freeBlocks;
+ if ((flags & HFS_ALLOC_SKIPFREEBLKS) == 0) {
+ // If the disk is already full, don't bother.
+ if (freeBlocks == 0) {
+ err = dskFulErr;
+ goto Exit;
+ }
+ if (forceContiguous && freeBlocks < minBlocks) {
+ err = dskFulErr;
+ goto Exit;
+ }
+
+ /*
+ * Clip if necessary so we don't over-subscribe the free blocks.
+ */
+ if (minBlocks > freeBlocks) {
+ minBlocks = freeBlocks;
+ }
+ if (maxBlocks > freeBlocks) {
+ maxBlocks = freeBlocks;
+ }
}
//
@@ -387,11 +411,16 @@ OSErr BlockAllocate (
// than one entry in the array
}
}
-
- //
- // Update the number of free blocks on the volume
- //
- vcb->freeBlocks -= *actualNumBlocks;
+
+ /*
+ * Update the number of free blocks on the volume
+ *
+ * Skip updating the free blocks count if the block are
+ * being allocated to relocate data as part of hfs_truncatefs()
+ */
+ if ((flags & HFS_ALLOC_SKIPFREEBLKS) == 0) {
+ vcb->freeBlocks -= *actualNumBlocks;
+ }
MarkVCBDirty(vcb);
HFS_MOUNT_UNLOCK(vcb, TRUE);
@@ -428,7 +457,8 @@ __private_extern__
OSErr BlockDeallocate (
ExtendedVCB *vcb, // Which volume to deallocate space on
u_int32_t firstBlock, // First block in range to deallocate
- u_int32_t numBlocks) // Number of contiguous blocks to deallocate
+ u_int32_t numBlocks, // Number of contiguous blocks to deallocate
+ u_int32_t flags)
{
OSErr err;
u_int32_t tempWord;
@@ -452,7 +482,15 @@ OSErr BlockDeallocate (
// Update the volume's free block count, and mark the VCB as dirty.
//
HFS_MOUNT_LOCK(vcb, TRUE);
- vcb->freeBlocks += numBlocks;
+
+ /*
+ * Do not update the free block count. This flags is specified
+ * when a volume is being truncated.
+ */
+ if ((flags & HFS_ALLOC_SKIPFREEBLKS) == 0) {
+ vcb->freeBlocks += numBlocks;
+ }
+
vcb->hfs_freed_block_count += numBlocks;
if (firstBlock < vcb->sparseAllocation) {
vcb->sparseAllocation = firstBlock;
View
17 bsd/hfs/hfscommon/headers/FileMgrInternal.h
@@ -205,20 +205,31 @@ ReplaceBTreeRecord (FileReference refNum,
/* Prototypes for exported routines in VolumeAllocation.c*/
+
+/*
+ * Flags for BlockAllocate() and BlockDeallocate()
+ */
+/* Force contiguous block allocation and to force minBlocks to actually be allocated */
+#define HFS_ALLOC_FORCECONTIG 0x1
+/* Can use metadata zone blocks */
+#define HFS_ALLOC_METAZONE 0x2
+/* Skip checking and updating of free blocks during allocation and deallocation */
+#define HFS_ALLOC_SKIPFREEBLKS 0x4
+
EXTERN_API_C( OSErr )
BlockAllocate (ExtendedVCB * vcb,
u_int32_t startingBlock,
u_int32_t minBlocks,
u_int32_t maxBlocks,
- Boolean forceContiguous,
- Boolean useMetaZone,
+ u_int32_t flags,
u_int32_t * startBlock,
u_int32_t * actualBlocks);
EXTERN_API_C( OSErr )
BlockDeallocate (ExtendedVCB * vcb,
u_int32_t firstBlock,
- u_int32_t numBlocks);
+ u_int32_t numBlocks,
+ u_int32_t flags);
EXTERN_API_C ( void )
invalidate_free_extent_cache (ExtendedVCB * vcb);
View
87 bsd/kern/kern_credential.c
@@ -145,6 +145,7 @@ static lck_mtx_t *kauth_resolver_mtx;
static volatile pid_t kauth_resolver_identity;
static int kauth_resolver_registered;
static uint32_t kauth_resolver_sequence;
+static int kauth_resolver_timeout = 30; /* default: 30 seconds */
struct kauth_resolver_work {
TAILQ_ENTRY(kauth_resolver_work) kr_link;
@@ -251,8 +252,8 @@ kauth_resolver_init(void)
* EINTR Operation interrupted (e.g. by
* a signal)
* ENOMEM Could not allocate work item
- * ??? An error from the user space
- * daemon
+ * workp->kr_result:??? An error from the user space
+ * daemon (includes ENOENT!)
*
* Notes: Allocate a work queue entry, submit the work and wait for
* the operation to either complete or time out. Outstanding
@@ -269,7 +270,9 @@ kauth_resolver_submit(struct kauth_identity_extlookup *lkp)
/* no point actually blocking if the resolver isn't up yet */
if (kauth_resolver_identity == 0) {
/*
- * We've already waited an initial 30 seconds with no result.
+ * We've already waited an initial <kauth_resolver_timeout>
+ * seconds with no result.
+ *
* Sleep on a stack address so no one wakes us before timeout;
* we sleep a half a second in case we are a high priority
* process, so that memberd doesn't starve while we are in a
@@ -312,7 +315,7 @@ kauth_resolver_submit(struct kauth_identity_extlookup *lkp)
wakeup_one((caddr_t)&kauth_resolver_unsubmitted);
for (;;) {
/* we could compute a better timeout here */
- ts.tv_sec = 30;
+ ts.tv_sec = kauth_resolver_timeout;
ts.tv_nsec = 0;
error = msleep(workp, kauth_resolver_mtx, PCATCH, "kr_submit", &ts);
/* request has been completed? */
@@ -359,12 +362,23 @@ kauth_resolver_submit(struct kauth_identity_extlookup *lkp)
*/
if ((error == EWOULDBLOCK) && (workp->kr_flags & KAUTH_REQUEST_UNSUBMITTED)) {
KAUTH_DEBUG("RESOLVER - request timed out without being collected for processing, resolver dead");
+
+ /*
+ * Make the current resolver non-authoritative, and mark it as
+ * no longer registered to prevent kauth_cred_ismember_gid()
+ * enqueueing more work until a new one is registered. This
+ * mitigates the damage a crashing resolver may inflict.
+ */
kauth_resolver_identity = 0;
+ kauth_resolver_registered = 0;
+
/* kill all the other requestes that are waiting as well */
TAILQ_FOREACH(killp, &kauth_resolver_submitted, kr_link)
wakeup(killp);
TAILQ_FOREACH(killp, &kauth_resolver_unsubmitted, kr_link)
wakeup(killp);
+ /* Cause all waiting-for-work threads to return EIO */
+ wakeup((caddr_t)&kauth_resolver_unsubmitted);
}
/*
@@ -455,6 +469,14 @@ identitysvc(__unused struct proc *p, struct identitysvc_args *uap, __unused int3
workp->kr_flags |= KAUTH_REQUEST_UNSUBMITTED;
TAILQ_INSERT_HEAD(&kauth_resolver_unsubmitted, workp, kr_link);
}
+ /*
+ * Allow user space resolver to override the
+ * external resolution timeout
+ */
+ if (message >= 30 && message <= 10000) {
+ kauth_resolver_timeout = message;
+ KAUTH_DEBUG("RESOLVER - new resolver changes timeout to %d seconds\n", (int)message);
+ }
kauth_resolver_identity = new_id;
kauth_resolver_registered = 1;
wakeup(&kauth_resolver_unsubmitted);
@@ -479,7 +501,15 @@ identitysvc(__unused struct proc *p, struct identitysvc_args *uap, __unused int3
struct kauth_resolver_work *killp;
KAUTH_RESOLVER_LOCK();
+
+ /*
+ * Clear the identity, but also mark it as unregistered so
+ * there is no explicit future expectation of us getting a
+ * new resolver any time soon.
+ */
kauth_resolver_identity = 0;
+ kauth_resolver_registered = 0;
+
TAILQ_FOREACH(killp, &kauth_resolver_submitted, kr_link)
wakeup(killp);
TAILQ_FOREACH(killp, &kauth_resolver_unsubmitted, kr_link)
@@ -706,9 +736,14 @@ kauth_resolver_complete(user_addr_t message)
KAUTH_DEBUG("RESOLVER - resolver %d died, waiting for a new one", kauth_resolver_identity);
/*
* Terminate outstanding requests; without an authoritative
- * resolver, we are now back on our own authority.
+ * resolver, we are now back on our own authority. Tag the
+ * resolver unregistered to prevent kauth_cred_ismember_gid()
+ * enqueueing more work until a new one is registered. This
+ * mitigates the damage a crashing resolver may inflict.
*/
kauth_resolver_identity = 0;
+ kauth_resolver_registered = 0;
+
TAILQ_FOREACH(killp, &kauth_resolver_submitted, kr_link)
wakeup(killp);
TAILQ_FOREACH(killp, &kauth_resolver_unsubmitted, kr_link)
@@ -2138,6 +2173,8 @@ kauth_groups_updatecache(struct kauth_identity_extlookup *el)
* kauth_resolver_submit:EWOULDBLOCK
* kauth_resolver_submit:EINTR
* kauth_resolver_submit:ENOMEM
+ * kauth_resolver_submit:ENOENT User space daemon did not vend
+ * this credential.
* kauth_resolver_submit:??? Unlikely error from user space
*
* Implicit returns:
@@ -2252,6 +2289,8 @@ kauth_cred_ismember_gid(kauth_cred_t cred, gid_t gid, int *resultp)
* Returns: 0 Success
* kauth_cred_guid2gid:EINVAL
* kauth_cred_ismember_gid:ENOENT
+ * kauth_resolver_submit:ENOENT User space daemon did not vend
+ * this credential.
* kauth_cred_ismember_gid:EWOULDBLOCK
* kauth_cred_ismember_gid:EINTR
* kauth_cred_ismember_gid:ENOMEM
@@ -2839,13 +2878,45 @@ kauth_cred_t
kauth_cred_create(kauth_cred_t cred)
{
kauth_cred_t found_cred, new_cred = NULL;
+ int is_member = 0;
KAUTH_CRED_HASH_LOCK_ASSERT();
- if (cred->cr_flags & CRF_NOMEMBERD)
+ if (cred->cr_flags & CRF_NOMEMBERD) {
cred->cr_gmuid = KAUTH_UID_NONE;
- else
- cred->cr_gmuid = cred->cr_uid;
+ } else {
+ /*
+ * If the template credential is not opting out of external
+ * group membership resolution, then we need to check that
+ * the UID we will be using is resolvable by the external
+ * resolver. If it's not, then we opt it out anyway, since
+ * all future external resolution requests will be failing
+ * anyway, and potentially taking a long time to do it. We
+ * use gid 0 because we always know it will exist and not
+ * trigger additional lookups. This is OK, because we end up
+ * precatching the information here as a result.
+ */
+ if (!kauth_cred_ismember_gid(cred, 0, &is_member)) {
+ /*
+ * It's a recognized value; we don't really care about
+ * the answer, so long as it's something the external
+ * resolver could have vended.
+ */
+ cred->cr_gmuid = cred->cr_uid;
+ } else {
+ /*
+ * It's not something the external resolver could
+ * have vended, so we don't want to ask it more
+ * questions about the credential in the future. This
+ * speeds up future lookups, as long as the caller
+ * caches results; otherwise, it the same recurring
+ * cost. Since most credentials are used multiple
+ * times, we still get some performance win from this.
+ */
+ cred->cr_gmuid = KAUTH_UID_NONE;
+ cred->cr_flags |= CRF_NOMEMBERD;
+ }
+ }
/* Caller *must* specify at least the egid in cr_groups[0] */
if (cred->cr_ngroups < 1)
View
2  bsd/kern/kern_descrip.c
@@ -789,7 +789,7 @@ fcntl_nocancel(proc_t p, struct fcntl_nocancel_args *uap, int32_t *retval)
goto out;
}
if (fp->f_type == DTYPE_PIPE) {
- error = fo_ioctl(fp, (int)TIOCSPGRP, (caddr_t)&tmp, &context);
+ error = fo_ioctl(fp, TIOCSPGRP, (caddr_t)&tmp, &context);
goto out;
}
View
2  bsd/kern/kern_fork.c
@@ -1375,6 +1375,8 @@ uthread_zone_init(void)
THREAD_CHUNK * sizeof(struct uthread),
"uthreads");
uthread_zone_inited = 1;
+
+ zone_change(uthread_zone, Z_NOENCRYPT, TRUE);
}
}
View
259 bsd/kern/kern_malloc.c
@@ -304,169 +304,170 @@ const char *memname[] = {
struct kmzones {
size_t kz_elemsize;
void *kz_zalloczone;
+ boolean_t kz_noencrypt;
} kmzones[M_LAST] = {
#define SOS(sname) sizeof (struct sname)
#define SOX(sname) -1
- { -1, 0 }, /* 0 M_FREE */
- { MSIZE, KMZ_CREATEZONE }, /* 1 M_MBUF */
- { 0, KMZ_MALLOC }, /* 2 M_DEVBUF */
- { SOS(socket), KMZ_CREATEZONE }, /* 3 M_SOCKET */
- { SOS(inpcb), KMZ_LOOKUPZONE }, /* 4 M_PCB */
- { M_MBUF, KMZ_SHAREZONE }, /* 5 M_RTABLE */
- { M_MBUF, KMZ_SHAREZONE }, /* 6 M_HTABLE */
- { M_MBUF, KMZ_SHAREZONE }, /* 7 M_FTABLE */
- { SOS(rusage), KMZ_CREATEZONE }, /* 8 M_ZOMBIE */
- { 0, KMZ_MALLOC }, /* 9 M_IFADDR */
- { M_MBUF, KMZ_SHAREZONE }, /* 10 M_SOOPTS */
- { 0, KMZ_MALLOC }, /* 11 M_SONAME */
- { MAXPATHLEN, KMZ_CREATEZONE }, /* 12 M_NAMEI */
- { 0, KMZ_MALLOC }, /* 13 M_GPROF */
- { 0, KMZ_MALLOC }, /* 14 M_IOCTLOPS */
- { 0, KMZ_MALLOC }, /* 15 M_MAPMEM */
- { SOS(ucred), KMZ_CREATEZONE }, /* 16 M_CRED */
- { SOS(pgrp), KMZ_CREATEZONE }, /* 17 M_PGRP */
- { SOS(session), KMZ_CREATEZONE }, /* 18 M_SESSION */
- { SOS(user32_iovec), KMZ_LOOKUPZONE }, /* 19 M_IOV32 */
- { SOS(mount), KMZ_CREATEZONE }, /* 20 M_MOUNT */
- { 0, KMZ_MALLOC }, /* 21 M_FHANDLE */
+ { -1, 0, FALSE }, /* 0 M_FREE */
+ { MSIZE, KMZ_CREATEZONE, FALSE }, /* 1 M_MBUF */
+ { 0, KMZ_MALLOC, FALSE }, /* 2 M_DEVBUF */
+ { SOS(socket), KMZ_CREATEZONE, TRUE }, /* 3 M_SOCKET */
+ { SOS(inpcb), KMZ_LOOKUPZONE, TRUE }, /* 4 M_PCB */
+ { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 5 M_RTABLE */
+ { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 6 M_HTABLE */
+ { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 7 M_FTABLE */
+ { SOS(rusage), KMZ_CREATEZONE, TRUE }, /* 8 M_ZOMBIE */
+ { 0, KMZ_MALLOC, FALSE }, /* 9 M_IFADDR */
+ { M_MBUF, KMZ_SHAREZONE, FALSE }, /* 10 M_SOOPTS */
+ { 0, KMZ_MALLOC, FALSE }, /* 11 M_SONAME */
+ { MAXPATHLEN, KMZ_CREATEZONE, FALSE }, /* 12 M_NAMEI */
+ { 0, KMZ_MALLOC, FALSE }, /* 13 M_GPROF */
+ { 0, KMZ_MALLOC, FALSE }, /* 14 M_IOCTLOPS */
+ { 0, KMZ_MALLOC, FALSE }, /* 15 M_MAPMEM */
+ { SOS(ucred), KMZ_CREATEZONE, FALSE }, /* 16 M_CRED */
+ { SOS(pgrp), KMZ_CREATEZONE, FALSE }, /* 17 M_PGRP */
+ { SOS(session), KMZ_CREATEZONE, FALSE }, /* 18 M_SESSION */
+ { SOS(user32_iovec), KMZ_LOOKUPZONE, FALSE },/* 19 M_IOV32 */
+ { SOS(mount), KMZ_CREATEZONE, FALSE }, /* 20 M_MOUNT */
+ { 0, KMZ_MALLOC, FALSE }, /* 21 M_FHANDLE */
#if (NFSCLIENT || NFSSERVER)
- { SOS(nfsreq), KMZ_CREATEZONE }, /* 22 M_NFSREQ */
- { SOS(nfsmount), KMZ_CREATEZONE }, /* 23 M_NFSMNT */
- { SOS(nfsnode), KMZ_CREATEZONE }, /* 24 M_NFSNODE */
+ { SOS(nfsreq), KMZ_CREATEZONE, FALSE }, /* 22 M_NFSREQ */
+ { SOS(nfsmount), KMZ_CREATEZONE, FALSE },/* 23 M_NFSMNT */
+ { SOS(nfsnode), KMZ_CREATEZONE, FALSE }, /* 24 M_NFSNODE */
#else
- { 0, KMZ_MALLOC }, /* 22 M_NFSREQ */
- { 0, KMZ_MALLOC }, /* 23 M_NFSMNT */
- { 0, KMZ_MALLOC }, /* 24 M_NFSNODE */
+ { 0, KMZ_MALLOC, FALSE }, /* 22 M_NFSREQ */
+ { 0, KMZ_MALLOC, FALSE }, /* 23 M_NFSMNT */
+ { 0, KMZ_MALLOC, FALSE }, /* 24 M_NFSNODE */
#endif
- { SOS(vnode), KMZ_CREATEZONE }, /* 25 M_VNODE */
- { SOS(namecache), KMZ_CREATEZONE }, /* 26 M_CACHE */
+ { SOS(vnode), KMZ_CREATEZONE, TRUE }, /* 25 M_VNODE */
+ { SOS(namecache),KMZ_CREATEZONE, FALSE }, /* 26 M_CACHE */
#if QUOTA
- { SOX(dquot), KMZ_LOOKUPZONE }, /* 27 M_DQUOT */
+ { SOX(dquot), KMZ_LOOKUPZONE, FALSE }, /* 27 M_DQUOT */
#else
- { 0, KMZ_MALLOC }, /* 27 M_DQUOT */
+ { 0, KMZ_MALLOC, FALSE }, /* 27 M_DQUOT */
#endif
- { 0, KMZ_MALLOC }, /* 28 M_UFSMNT */
- { 0, KMZ_MALLOC }, /* 29 M_CGSUM */
- { SOS(plimit), KMZ_CREATEZONE }, /* 30 M_PLIMIT */
- { SOS(sigacts), KMZ_CREATEZONE }, /* 31 M_SIGACTS */
- { 0, KMZ_MALLOC }, /* 32 M_VMOBJ */
- { 0, KMZ_MALLOC }, /* 33 M_VMOBJHASH */
- { 0, KMZ_MALLOC }, /* 34 M_VMPMAP */
- { 0, KMZ_MALLOC }, /* 35 M_VMPVENT */
- { 0, KMZ_MALLOC }, /* 36 M_VMPAGER */
- { 0, KMZ_MALLOC }, /* 37 M_VMPGDATA */
- { SOS(fileproc), KMZ_CREATEZONE }, /* 38 M_FILEPROC */
- { SOS(filedesc), KMZ_CREATEZONE }, /* 39 M_FILEDESC */
- { SOX(lockf), KMZ_CREATEZONE }, /* 40 M_LOCKF */
- { SOS(proc), KMZ_CREATEZONE }, /* 41 M_PROC */
- { SOS(pstats), KMZ_CREATEZONE }, /* 42 M_PSTATS */
- { 0, KMZ_MALLOC }, /* 43 M_SEGMENT */
- { M_FFSNODE, KMZ_SHAREZONE }, /* 44 M_LFSNODE */
- { 0, KMZ_MALLOC }, /* 45 M_FFSNODE */
- { M_FFSNODE, KMZ_SHAREZONE }, /* 46 M_MFSNODE */
- { 0, KMZ_MALLOC }, /* 47 M_NQLEASE */
- { 0, KMZ_MALLOC }, /* 48 M_NQMHOST */
- { 0, KMZ_MALLOC }, /* 49 M_NETADDR */
+ { 0, KMZ_MALLOC, FALSE }, /* 28 M_UFSMNT */
+ { 0, KMZ_MALLOC, FALSE }, /* 29 M_CGSUM */
+ { SOS(plimit), KMZ_CREATEZONE, TRUE }, /* 30 M_PLIMIT */
+ { SOS(sigacts), KMZ_CREATEZONE, TRUE }, /* 31 M_SIGACTS */
+ { 0, KMZ_MALLOC, FALSE }, /* 32 M_VMOBJ */
+ { 0, KMZ_MALLOC, FALSE }, /* 33 M_VMOBJHASH */
+ { 0, KMZ_MALLOC, FALSE }, /* 34 M_VMPMAP */
+ { 0, KMZ_MALLOC, FALSE }, /* 35 M_VMPVENT */
+ { 0, KMZ_MALLOC, FALSE }, /* 36 M_VMPAGER */
+ { 0, KMZ_MALLOC, FALSE }, /* 37 M_VMPGDATA */
+ { SOS(fileproc),KMZ_CREATEZONE, TRUE }, /* 38 M_FILEPROC */
+ { SOS(filedesc),KMZ_CREATEZONE, TRUE }, /* 39 M_FILEDESC */
+ { SOX(lockf), KMZ_CREATEZONE, TRUE }, /* 40 M_LOCKF */
+ { SOS(proc), KMZ_CREATEZONE, FALSE }, /* 41 M_PROC */
+ { SOS(pstats), KMZ_CREATEZONE, TRUE }, /* 42 M_PSTATS */
+ { 0, KMZ_MALLOC, FALSE }, /* 43 M_SEGMENT */
+ { M_FFSNODE, KMZ_SHAREZONE, FALSE }, /* 44 M_LFSNODE */
+ { 0, KMZ_MALLOC, FALSE }, /* 45 M_FFSNODE */
+ { M_FFSNODE, KMZ_SHAREZONE, FALSE }, /* 46 M_MFSNODE */
+ { 0, KMZ_MALLOC, FALSE }, /* 47 M_NQLEASE */
+ { 0, KMZ_MALLOC, FALSE }, /* 48 M_NQMHOST */
+ { 0, KMZ_MALLOC, FALSE }, /* 49 M_NETADDR */
#if (NFSCLIENT || NFSSERVER)
{ SOX(nfsrv_sock),
- KMZ_CREATEZONE }, /* 50 M_NFSSVC */
- { 0, KMZ_MALLOC }, /* 51 M_NFSUID */
+ KMZ_CREATEZONE, FALSE }, /* 50 M_NFSSVC */
+ { 0, KMZ_MALLOC, FALSE }, /* 51 M_NFSUID */
{ SOX(nfsrvcache),
- KMZ_CREATEZONE }, /* 52 M_NFSD */
+ KMZ_CREATEZONE, FALSE }, /* 52 M_NFSD */
#else
- { 0, KMZ_MALLOC }, /* 50 M_NFSSVC */
- { 0, KMZ_MALLOC }, /* 51 M_NFSUID */
- { 0, KMZ_MALLOC }, /* 52 M_NFSD */
+ { 0, KMZ_MALLOC, FALSE }, /* 50 M_NFSSVC */
+ { 0, KMZ_MALLOC, FALSE }, /* 51 M_NFSUID */
+ { 0, KMZ_MALLOC, FALSE }, /* 52 M_NFSD */
#endif
{ SOX(ip_moptions),
- KMZ_LOOKUPZONE }, /* 53 M_IPMOPTS */
- { SOX(in_multi), KMZ_LOOKUPZONE }, /* 54 M_IPMADDR */
+ KMZ_LOOKUPZONE, FALSE }, /* 53 M_IPMOPTS */
+ { SOX(in_multi),KMZ_LOOKUPZONE, FALSE }, /* 54 M_IPMADDR */
{ SOX(ether_multi),
- KMZ_LOOKUPZONE }, /* 55 M_IFMADDR */
- { SOX(mrt), KMZ_CREATEZONE }, /* 56 M_MRTABLE */
- { 0, KMZ_MALLOC }, /* 57 unused entry */
- { 0, KMZ_MALLOC }, /* 58 unused entry */
+ KMZ_LOOKUPZONE, FALSE }, /* 55 M_IFMADDR */
+ { SOX(mrt), KMZ_CREATEZONE, TRUE }, /* 56 M_MRTABLE */
+ { 0, KMZ_MALLOC, FALSE }, /* 57 unused entry */
+ { 0, KMZ_MALLOC, FALSE }, /* 58 unused entry */
#if (NFSCLIENT || NFSSERVER)
{ SOS(nfsrv_descript),
- KMZ_CREATEZONE }, /* 59 M_NFSRVDESC */
- { SOS(nfsdmap), KMZ_CREATEZONE }, /* 60 M_NFSDIROFF */
- { SOS(fhandle), KMZ_LOOKUPZONE }, /* 61 M_NFSBIGFH */
+ KMZ_CREATEZONE, FALSE }, /* 59 M_NFSRVDESC */
+ { SOS(nfsdmap), KMZ_CREATEZONE, FALSE }, /* 60 M_NFSDIROFF */
+ { SOS(fhandle), KMZ_LOOKUPZONE, FALSE }, /* 61 M_NFSBIGFH */
#else
- { 0, KMZ_MALLOC }, /* 59 M_NFSRVDESC */
- { 0, KMZ_MALLOC }, /* 60 M_NFSDIROFF */
- { 0, KMZ_MALLOC }, /* 61 M_NFSBIGFH */
+ { 0, KMZ_MALLOC, FALSE }, /* 59 M_NFSRVDESC */
+ { 0, KMZ_MALLOC, FALSE }, /* 60 M_NFSDIROFF */
+ { 0, KMZ_MALLOC, FALSE }, /* 61 M_NFSBIGFH */
#endif
- { 0, KMZ_MALLOC }, /* 62 M_MSDOSFSMNT */
- { 0, KMZ_MALLOC }, /* 63 M_MSDOSFSFAT */
- { 0, KMZ_MALLOC }, /* 64 M_MSDOSFSNODE */
- { SOS(tty), KMZ_CREATEZONE }, /* 65 M_TTYS */
- { 0, KMZ_MALLOC }, /* 66 M_EXEC */
- { 0, KMZ_MALLOC }, /* 67 M_MISCFSMNT */
- { 0, KMZ_MALLOC }, /* 68 M_MISCFSNODE */
- { 0, KMZ_MALLOC }, /* 69 M_ADOSFSMNT */
- { 0, KMZ_MALLOC }, /* 70 M_ADOSFSNODE */
- { 0, KMZ_MALLOC }, /* 71 M_ANODE */
- { SOX(buf), KMZ_CREATEZONE }, /* 72 M_BUFHDR */
+ { 0, KMZ_MALLOC, FALSE }, /* 62 M_MSDOSFSMNT */
+ { 0, KMZ_MALLOC, FALSE }, /* 63 M_MSDOSFSFAT */
+ { 0, KMZ_MALLOC, FALSE }, /* 64 M_MSDOSFSNODE */
+ { SOS(tty), KMZ_CREATEZONE, FALSE }, /* 65 M_TTYS */
+ { 0, KMZ_MALLOC, FALSE }, /* 66 M_EXEC */
+ { 0, KMZ_MALLOC, FALSE }, /* 67 M_MISCFSMNT */
+ { 0, KMZ_MALLOC, FALSE }, /* 68 M_MISCFSNODE */
+ { 0, KMZ_MALLOC, FALSE }, /* 69 M_ADOSFSMNT */
+ { 0, KMZ_MALLOC, FALSE }, /* 70 M_ADOSFSNODE */
+ { 0, KMZ_MALLOC, FALSE }, /* 71 M_ANODE */
+ { SOX(buf), KMZ_CREATEZONE, TRUE }, /* 72 M_BUFHDR */
{ (NDFILE * OFILESIZE),
- KMZ_CREATEZONE }, /* 73 M_OFILETABL */
- { MCLBYTES, KMZ_CREATEZONE }, /* 74 M_MCLUST */
+ KMZ_CREATEZONE, FALSE }, /* 73 M_OFILETABL */
+ { MCLBYTES, KMZ_CREATEZONE, FALSE }, /* 74 M_MCLUST */
#if HFS
- { SOX(hfsmount), KMZ_LOOKUPZONE }, /* 75 M_HFSMNT */
- { SOS(cnode), KMZ_CREATEZONE }, /* 76 M_HFSNODE */
- { SOS(filefork), KMZ_CREATEZONE }, /* 77 M_HFSFORK */
+ { SOX(hfsmount),KMZ_LOOKUPZONE, FALSE }, /* 75 M_HFSMNT */
+ { SOS(cnode), KMZ_CREATEZONE, TRUE }, /* 76 M_HFSNODE */
+ { SOS(filefork),KMZ_CREATEZONE, TRUE }, /* 77 M_HFSFORK */
#else
- { 0, KMZ_MALLOC }, /* 75 M_HFSMNT */
- { 0, KMZ_MALLOC }, /* 76 M_HFSNODE */
- { 0, KMZ_MALLOC }, /* 77 M_HFSFORK */
+ { 0, KMZ_MALLOC, FALSE }, /* 75 M_HFSMNT */
+ { 0, KMZ_MALLOC, FALSE }, /* 76 M_HFSNODE */
+ { 0, KMZ_MALLOC, FALSE }, /* 77 M_HFSFORK */
#endif
- { 0, KMZ_MALLOC }, /* 78 M_ZFSMNT */
- { 0, KMZ_MALLOC }, /* 79 M_ZFSNODE */
- { 0, KMZ_MALLOC }, /* 80 M_TEMP */
- { 0, KMZ_MALLOC }, /* 81 M_SECA */
- { 0, KMZ_MALLOC }, /* 82 M_DEVFS */
- { 0, KMZ_MALLOC }, /* 83 M_IPFW */
- { 0, KMZ_MALLOC }, /* 84 M_UDFNODE */
- { 0, KMZ_MALLOC }, /* 85 M_UDFMOUNT */
- { 0, KMZ_MALLOC }, /* 86 M_IP6NDP */
- { 0, KMZ_MALLOC }, /* 87 M_IP6OPT */
- { 0, KMZ_MALLOC }, /* 88 M_IP6MISC */
- { 0, KMZ_MALLOC }, /* 89 M_TSEGQ */
- { 0, KMZ_MALLOC }, /* 90 M_IGMP */
+ { 0, KMZ_MALLOC, FALSE }, /* 78 M_ZFSMNT */
+ { 0, KMZ_MALLOC, FALSE }, /* 79 M_ZFSNODE */
+ { 0, KMZ_MALLOC, FALSE }, /* 80 M_TEMP */
+ { 0, KMZ_MALLOC, FALSE }, /* 81 M_SECA */
+ { 0, KMZ_MALLOC, FALSE }, /* 82 M_DEVFS */
+ { 0, KMZ_MALLOC, FALSE }, /* 83 M_IPFW */
+ { 0, KMZ_MALLOC, FALSE }, /* 84 M_UDFNODE */
+ { 0, KMZ_MALLOC, FALSE }, /* 85 M_UDFMOUNT */
+ { 0, KMZ_MALLOC, FALSE }, /* 86 M_IP6NDP */
+ { 0, KMZ_MALLOC, FALSE }, /* 87 M_IP6OPT */
+ { 0, KMZ_MALLOC, FALSE }, /* 88 M_IP6MISC */
+ { 0, KMZ_MALLOC, FALSE }, /* 89 M_TSEGQ */
+ { 0, KMZ_MALLOC, FALSE }, /* 90 M_IGMP */
#if JOURNALING
- { SOS(journal), KMZ_CREATEZONE }, /* 91 M_JNL_JNL */
- { SOS(transaction), KMZ_CREATEZONE }, /* 92 M_JNL_TR */
+ { SOS(journal), KMZ_CREATEZONE, FALSE }, /* 91 M_JNL_JNL */
+ { SOS(transaction), KMZ_CREATEZONE, FALSE }, /* 92 M_JNL_TR */
#else
- { 0, KMZ_MALLOC }, /* 91 M_JNL_JNL */
- { 0, KMZ_MALLOC }, /* 92 M_JNL_TR */
+ { 0, KMZ_MALLOC, FALSE }, /* 91 M_JNL_JNL */
+ { 0, KMZ_MALLOC, FALSE }, /* 92 M_JNL_TR */
#endif
- { SOS(specinfo), KMZ_CREATEZONE }, /* 93 M_SPECINFO */
- { SOS(kqueue), KMZ_CREATEZONE }, /* 94 M_KQUEUE */
+ { SOS(specinfo), KMZ_CREATEZONE, TRUE }, /* 93 M_SPECINFO */
+ { SOS(kqueue), KMZ_CREATEZONE, FALSE }, /* 94 M_KQUEUE */
#if HFS
- { SOS(directoryhint), KMZ_CREATEZONE }, /* 95 M_HFSDIRHINT */
+ { SOS(directoryhint), KMZ_CREATEZONE, FALSE }, /* 95 M_HFSDIRHINT */
#else
- { 0, KMZ_MALLOC }, /* 95 M_HFSDIRHINT */
+ { 0, KMZ_MALLOC, FALSE }, /* 95 M_HFSDIRHINT */
#endif
- { SOS(cl_readahead), KMZ_CREATEZONE }, /* 96 M_CLRDAHEAD */
- { SOS(cl_writebehind),KMZ_CREATEZONE }, /* 97 M_CLWRBEHIND */
- { SOS(user64_iovec), KMZ_LOOKUPZONE }, /* 98 M_IOV64 */
- { SOS(fileglob), KMZ_CREATEZONE }, /* 99 M_FILEGLOB */
- { 0, KMZ_MALLOC }, /* 100 M_KAUTH */
- { 0, KMZ_MALLOC }, /* 101 M_DUMMYNET */
+ { SOS(cl_readahead), KMZ_CREATEZONE, TRUE }, /* 96 M_CLRDAHEAD */
+ { SOS(cl_writebehind),KMZ_CREATEZONE, TRUE }, /* 97 M_CLWRBEHIND */
+ { SOS(user64_iovec), KMZ_LOOKUPZONE, FALSE },/* 98 M_IOV64 */
+ { SOS(fileglob), KMZ_CREATEZONE, TRUE }, /* 99 M_FILEGLOB */
+ { 0, KMZ_MALLOC, FALSE }, /* 100 M_KAUTH */
+ { 0, KMZ_MALLOC, FALSE }, /* 101 M_DUMMYNET */
#ifndef __LP64__
- { SOS(unsafe_fsnode),KMZ_CREATEZONE }, /* 102 M_UNSAFEFS */
+ { SOS(unsafe_fsnode),KMZ_CREATEZONE, FALSE }, /* 102 M_UNSAFEFS */
#else
- { 0, KMZ_MALLOC }, /* 102 M_UNSAFEFS */
+ { 0, KMZ_MALLOC, FALSE }, /* 102 M_UNSAFEFS */
#endif /* __LP64__ */
- { 0, KMZ_MALLOC }, /* 103 M_MACPIPELABEL */
- { 0, KMZ_MALLOC }, /* 104 M_MACTEMP */
- { 0, KMZ_MALLOC }, /* 105 M_SBUF */
- { 0, KMZ_MALLOC }, /* 106 M_HFS_EXTATTR */
- { 0, KMZ_MALLOC }, /* 107 M_LCTX */
- { 0, KMZ_MALLOC }, /* 108 M_TRAFFIC_MGT */
+ { 0, KMZ_MALLOC, FALSE }, /* 103 M_MACPIPELABEL */
+ { 0, KMZ_MALLOC, FALSE }, /* 104 M_MACTEMP */
+ { 0, KMZ_MALLOC, FALSE }, /* 105 M_SBUF */
+ { 0, KMZ_MALLOC, FALSE }, /* 106 M_HFS_EXTATTR */
+ { 0, KMZ_MALLOC, FALSE }, /* 107 M_LCTX */
+ { 0, KMZ_MALLOC, FALSE }, /* 108 M_TRAFFIC_MGT */
#if HFS_COMPRESSION
- { SOS(decmpfs_cnode),KMZ_CREATEZONE }, /* 109 M_DECMPFS_CNODE */
+ { SOS(decmpfs_cnode),KMZ_CREATEZONE, FALSE }, /* 109 M_DECMPFS_CNODE */
#else
- { 0, KMZ_MALLOC }, /* 109 M_DECMPFS_CNODE */
+ { 0, KMZ_MALLOC, FALSE }, /* 109 M_DECMPFS_CNODE */
#endif /* HFS_COMPRESSION */
#undef SOS
#undef SOX
@@ -498,6 +499,8 @@ kmeminit(void)
kmz->kz_zalloczone = zinit(kmz->kz_elemsize,
1024 * 1024, PAGE_SIZE,
memname[kmz - kmzones]);
+ if (kmz->kz_noencrypt == TRUE)
+ zone_change(kmz->kz_zalloczone, Z_NOENCRYPT, TRUE);
}
else if (kmz->kz_zalloczone == KMZ_LOOKUPZONE)
kmz->kz_zalloczone = kalloc_zone(kmz->kz_elemsize);
View
13 bsd/kern/kern_symfile.c
@@ -96,7 +96,8 @@ kern_open_file_for_direct_io(const char * name,
void * callback_ref,
dev_t * device_result,
uint64_t * partitionbase_result,
- uint64_t * maxiocount_result)
+ uint64_t * maxiocount_result,
+ boolean_t * solid_state)
{
struct kern_direct_file_io_ref_t * ref;
@@ -225,6 +226,16 @@ kern_open_file_for_direct_io(const char * name,
if (maxiocount_result)
*maxiocount_result = maxiocount;
+ if (solid_state)
+ {
+ int isssd = 0;
+ error = do_ioctl(p1, p2, DKIOCISSOLIDSTATE, (caddr_t)&isssd);
+ if (error)
+ *solid_state = FALSE;
+ else
+ *solid_state = isssd;
+ }
+
// generate the block list
error = 0;
View
2  bsd/kern/kpi_mbuf.c
@@ -83,7 +83,7 @@ errno_t mbuf_align_32(mbuf_t mbuf, size_t len)
addr64_t mbuf_data_to_physical(void* ptr)
{
- return (addr64_t)(intptr_t)mcl_to_paddr(ptr);
+ return (addr64_t)(uintptr_t)mcl_to_paddr(ptr);
}
errno_t mbuf_get(mbuf_how_t how, mbuf_type_t type, mbuf_t *mbuf)
View
2  bsd/kern/tty.c
@@ -3022,6 +3022,8 @@ ttymalloc(void)
/* output queue doesn't need quoting */
clalloc(&tp->t_outq, TTYCLSIZE, 0);
lck_mtx_init(&tp->t_lock, tty_lck_grp, tty_lck_attr);
+ klist_init(&tp->t_rsel.si_note);
+ klist_init(&tp->t_wsel.si_note);
}
return(tp);
}
View
2  bsd/kern/tty_ptmx.c
@@ -1585,12 +1585,10 @@ ptsd_kqfilter(dev_t dev, struct knote *kn)
switch (kn->kn_filter) {
case EVFILT_READ:
kn->kn_fop = &ptsd_kqops_read;
- SLIST_INIT(&tp->t_rsel.si_note);
KNOTE_ATTACH(&tp->t_rsel.si_note, kn);
break;
case EVFILT_WRITE:
kn->kn_fop = &ptsd_kqops_write;
- SLIST_INIT(&tp->t_wsel.si_note);
KNOTE_ATTACH(&tp->t_wsel.si_note, kn);
break;
default:
View
2  bsd/kern/ubc_subr.c
@@ -374,6 +374,8 @@ ubc_init(void)
i = (vm_size_t) sizeof (struct ubc_info);
ubc_info_zone = zinit (i, 10000*i, 8192, "ubc_info zone");
+
+ zone_change(ubc_info_zone, Z_NOENCRYPT, TRUE);
}
View
1  bsd/kern/uipc_socket.c
@@ -275,6 +275,7 @@ socketinit(void)
get_inpcb_str_size() + 4 + get_tcp_str_size());
so_cache_zone = zinit(str_size, 120000*str_size, 8192, "socache zone");
+ zone_change(so_cache_zone, Z_NOENCRYPT, TRUE);
#if TEMPDEBUG
printf("cached_sock_alloc -- so_cache_zone size is %x\n", str_size);
#endif
View
7 bsd/net/raw_usrreq.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2000-2007 Apple Inc. All rights reserved.
+ * Copyright (c) 2000-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
@@ -304,6 +304,11 @@ raw_usend(struct socket *so, int flags, struct mbuf *m,
goto release;
}
+ if (so->so_proto->pr_output == NULL) {
+ error = EOPNOTSUPP;
+ goto release;
+ }
+
if (control && control->m_len) {
error = EOPNOTSUPP;
goto release;
View
1  bsd/net/route.c
@@ -624,6 +624,7 @@ route_init(void)
panic("route_init: failed allocating rte_zone");
zone_change(rte_zone, Z_EXPAND, TRUE);
+ zone_change(rte_zone, Z_NOENCRYPT, TRUE);
TAILQ_INIT(&rttrash_head);
}
View
13 bsd/netinet/in_pcb.c
@@ -776,7 +776,7 @@ in_pcbconnect(struct inpcb *inp, struct sockaddr *nam, struct proc *p)
inp->inp_lport, 0, NULL);
socket_lock(inp->inp_socket, 0);
if (pcb != NULL) {
- in_pcb_checkstate(pcb, WNT_RELEASE, 0);
+ in_pcb_checkstate(pcb, WNT_RELEASE, pcb == inp ? 1 : 0);
return (EADDRINUSE);
}
if (inp->inp_laddr.s_addr == INADDR_ANY) {
@@ -1621,9 +1621,14 @@ inp_route_copyout(struct inpcb *inp, struct route *dst)
lck_mtx_assert(inp->inpcb_mtx, LCK_MTX_ASSERT_OWNED);
- /* Minor sanity check */
- if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET)
- panic("%s: wrong or corrupted route: %p", __func__, src);
+ /*
+ * If the route in the PCB is not for IPv4, blow it away;
+ * this is possible in the case of IPv4-mapped address case.
+ */
+ if (src->ro_rt != NULL && rt_key(src->ro_rt)->sa_family != AF_INET) {
+ rtfree(src->ro_rt);
+ src->ro_rt = NULL;
+ }
/* Copy everything (rt, dst, flags) from PCB */
bcopy(src, dst, sizeof (*dst));
View
25 bsd/netinet/ip_input.c
@@ -559,6 +559,7 @@ ip_proto_dispatch_in(
int seen = (inject_ipfref == 0);
int changed_header = 0;
struct ip *ip;
+ void (*pr_input)(struct mbuf *, int len);
if (!TAILQ_EMPTY(&ipv4_filters)) {
ipf_ref();
@@ -598,20 +599,21 @@ ip_proto_dispatch_in(
* otherwise let the protocol deal with its own locking
*/
ip = mtod(m, struct ip *);
-
+
if (changed_header) {
ip->ip_len = ntohs(ip->ip_len) - hlen;
ip->ip_off = ntohs(ip->ip_off);
}
-
- if (!(ip_protox[ip->ip_p]->pr_flags & PR_PROTOLOCK)) {
+
+ if ((pr_input = ip_protox[ip->ip_p]->pr_input) == NULL) {
+ m_freem(m);
+ } else if (!(ip_protox[ip->ip_p]->pr_flags & PR_PROTOLOCK)) {
lck_mtx_lock(inet_domain_mutex);
- (*ip_protox[ip->ip_p]->pr_input)(m, hlen);
+ pr_input(m, hlen);
lck_mtx_unlock(inet_domain_mutex);
- }
- else
- (*ip_protox[ip->ip_p]->pr_input)(m, hlen);
-
+ } else {
+ pr_input(m, hlen);
+ }
}
/*
@@ -624,15 +626,16 @@ ip_input(struct mbuf *m)
struct ip *ip;
struct ipq *fp;
struct in_ifaddr *ia = NULL;
- int i, hlen, checkif;
+ int hlen, checkif;
u_short sum;
struct in_addr pkt_dst;
- u_int32_t div_info = 0; /* packet divert/tee info */
#if IPFIREWALL
+ int i;
+ u_int32_t div_info = 0; /* packet divert/tee info */
struct ip_fw_args args;
+ struct m_tag *tag;
#endif
ipfilter_t inject_filter_ref = 0;
- struct m_tag *tag;
#if IPFIREWALL
args.eh = NULL;
View
2  bsd/netinet/ip_mroute.c
@@ -77,7 +77,7 @@
#endif
-#ifndef MROUTING
+#if !MROUTING
extern u_int32_t _ip_mcast_src(int vifi);
extern int _ip_mforward(struct ip *ip, struct ifnet *ifp,
struct mbuf *m, struct ip_moptions *imo);
View
115 bsd/netinet/ip_output.c
@@ -3182,7 +3182,7 @@ in_selectsrcif(struct ip *ip, struct route *ro, unsigned int ifscope)
struct in_addr src = ip->ip_src;
struct in_addr dst = ip->ip_dst;
struct ifnet *rt_ifp;
- char s_src[16], s_dst[16];
+ char s_src[MAX_IPv4_STR_LEN], s_dst[MAX_IPv4_STR_LEN];
if (ip_select_srcif_debug) {
(void) inet_ntop(AF_INET, &src.s_addr, s_src, sizeof (s_src));
@@ -3222,6 +3222,22 @@ in_selectsrcif(struct ip *ip, struct route *ro, unsigned int ifscope)
ifa = (struct ifaddr *)ifa_foraddr_scoped(src.s_addr, scope);
+ if (ifa == NULL && ip->ip_p != IPPROTO_UDP &&
+ ip->ip_p != IPPROTO_TCP && ipforwarding) {
+ /*
+ * If forwarding is enabled, and if the packet isn't
+ * TCP or UDP, check if the source address belongs
+ * to one of our own interfaces; if so, demote the
+ * interface scope and do a route lookup right below.
+ */
+ ifa = (struct ifaddr *)ifa_foraddr(src.s_addr);
+ if (ifa != NULL) {
+ ifafree(ifa);
+ ifa = NULL;
+ ifscope = IFSCOPE_NONE;
+ }
+ }
+
if (ip_select_srcif_debug && ifa != NULL) {
if (ro->ro_rt != NULL) {
printf("%s->%s ifscope %d->%d ifa_if %s%d "
@@ -3251,6 +3267,103 @@ in_selectsrcif(struct ip *ip, struct route *ro, unsigned int ifscope)
if (ifa == NULL && ifscope == IFSCOPE_NONE) {
ifa = (struct ifaddr *)ifa_foraddr(src.s_addr);
+ /*
+ * If we have the IP address, but not the route, we don't
+ * really know whether or not it belongs to the correct
+ * interface (it could be shared across multiple interfaces.)
+ * The only way to find out is to do a route lookup.
+ */
+ if (ifa != NULL && ro->ro_rt == NULL) {
+ struct rtentry *rt;
+ struct sockaddr_in sin;
+ struct ifaddr *oifa = NULL;
+
+ bzero(&sin, sizeof (sin));
+ sin.sin_family = AF_INET;
+ sin.sin_len = sizeof (sin);
+ sin.sin_addr = dst;
+
+ lck_mtx_lock(rnh_lock);
+ if ((rt = rt_lookup(TRUE, (struct sockaddr *)&sin, NULL,
+ rt_tables[AF_INET], IFSCOPE_NONE)) != NULL) {
+ RT_LOCK(rt);
+ /*
+ * If the route uses a different interface,
+ * use that one instead. The IP address of
+ * the ifaddr that we pick up here is not
+ * relevant.
+ */
+ if (ifa->ifa_ifp != rt->rt_ifp) {
+ oifa = ifa;
+ ifa = rt->rt_ifa;
+ ifaref(ifa);
+ RT_UNLOCK(rt);
+ } else {
+ RT_UNLOCK(rt);
+ }
+ rtfree_locked(rt);
+ }
+ lck_mtx_unlock(rnh_lock);
+
+ if (oifa != NULL) {
+ struct ifaddr *iifa;
+
+ /*
+ * See if the interface pointed to by the
+ * route is configured with the source IP
+ * address of the packet.
+ */
+ iifa = (struct ifaddr *)ifa_foraddr_scoped(
+ src.s_addr, ifa->ifa_ifp->if_index);
+
+ if (iifa != NULL) {
+ /*
+ * Found it; drop the original one
+ * as well as the route interface
+ * address, and use this instead.
+ */
+ ifafree(oifa);
+ ifafree(ifa);
+ ifa = iifa;
+ } else if (!ipforwarding ||
+ (rt->rt_flags & RTF_GATEWAY)) {
+ /*
+ * This interface doesn't have that
+ * source IP address; drop the route
+ * interface address and just use the
+ * original one, and let the caller
+ * do a scoped route lookup.
+ */
+ ifafree(ifa);
+ ifa = oifa;
+ } else {
+ /*
+ * Forwarding is enabled and the source
+ * address belongs to one of our own
+ * interfaces which isn't the outgoing
+ * interface, and we have a route, and
+ * the destination is on a network that
+ * is directly attached (onlink); drop
+ * the original one and use the route
+ * interface address instead.
+ */
+ ifafree(oifa);
+ }
+ }
+ } else if (ifa != NULL && ro->ro_rt != NULL &&
+ !(ro->ro_rt->rt_flags & RTF_GATEWAY) &&
+ ifa->ifa_ifp != ro->ro_rt->rt_ifp && ipforwarding) {
+ /*
+ * Forwarding is enabled and the source address belongs
+ * to one of our own interfaces which isn't the same
+ * as the interface used by the known route; drop the
+ * original one and use the route interface address.
+ */
+ ifafree(ifa);
+ ifa = ro->ro_rt->rt_ifa;
+ ifaref(ifa);
+ }
+
if (ip_select_srcif_debug && ifa != NULL) {
printf("%s->%s ifscope %d ifa_if %s%d\n",
s_src, s_dst, ifscope, ifa->ifa_ifp->if_name,
View
2  bsd/netinet6/in6_pcb.c
@@ -464,7 +464,7 @@ in6_pcbconnect(inp, nam, p)
inp->inp_lport, 0, NULL);
socket_lock(inp->inp_socket, 0);
if (pcb != NULL) {
- in_pcb_checkstate(pcb, WNT_RELEASE, 0);
+ in_pcb_checkstate(pcb, WNT_RELEASE, pcb == inp ? 1 : 0);
return (EADDRINUSE);
}
if (IN6_IS_ADDR_UNSPECIFIED(&inp->in6p_laddr)) {
View
12 bsd/netinet6/in6_proto.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2008 Apple Inc. All rights reserved.
+ * Copyright (c) 2008-2010 Apple Inc. All rights reserved.
*
* @APPLE_OSREFERENCE_LICENSE_HEADER_START@
*
@@ -322,16 +322,6 @@ struct ip6protosw inet6sw[] = {
0, rip_unlock, 0,
{ 0, 0 }, NULL, { 0 }
},
-#else
-{ SOCK_RAW, &inet6domain, IPPROTO_PIM, PR_ATOMIC|PR_ADDR|PR_LASTHDR,
- 0, 0, 0, rip6_ctloutput,
- 0,
- 0, 0, 0, 0,
- 0,
- &rip6_usrreqs,
- 0, rip_unlock, 0,
- { 0, 0 }, NULL, { 0 }
-},
#endif
/* raw wildcard */
{ SOCK_RAW, &inet6domain, 0, PR_ATOMIC|PR_ADDR|PR_LASTHDR,
View
16 bsd/netinet6/ip6_input.c
@@ -955,7 +955,8 @@ ip6_input(m)
while (nxt != IPPROTO_DONE) {
struct ipfilter *filter;
-
+ int (*pr_input)(struct mbuf **, int *);
+
if (ip6_hdrnestlimit && (++nest > ip6_hdrnestlimit)) {
ip6stat.ip6s_toomanyhdr++;
goto badunlocked;
@@ -1028,13 +1029,18 @@ ip6_input(m)
}
ipf_unref();
}
- if (!(ip6_protox[nxt]->pr_flags & PR_PROTOLOCK)) {
+
+ if ((pr_input = ip6_protox[nxt]->pr_input) == NULL) {
+ m_freem(m);
+ m = NULL;
+ nxt = IPPROTO_DONE;
+ } else if (!(ip6_protox[nxt]->pr_flags & PR_PROTOLOCK)) {
lck_mtx_lock(inet6_domain_mutex);
- nxt = (*ip6_protox[nxt]->pr_input)(&m, &off);
+ nxt = pr_input(&m, &off);
lck_mtx_unlock(inet6_domain_mutex);
+ } else {
+ nxt = pr_input(&m, &off);
}
- else
- nxt = (*ip6_protox[nxt]->pr_input)(&m, &off);
}
return;
bad:
View
2  bsd/netinet6/ip6_var.h
@@ -300,7 +300,7 @@ extern int ip6_neighborgcthresh; /* Threshold # of NDP entries for GC */
extern int ip6_maxifprefixes; /* Max acceptable prefixes via RA per IF */
extern int ip6_maxifdefrouters; /* Max acceptable def routers via RA */
extern int ip6_maxdynroutes; /* Max # of routes created via redirect */
-#ifdef MROUTING
+#if MROUTING
extern struct socket *ip6_mrouter; /* multicast routing daemon */
#endif
extern int ip6_sendredirects; /* send IP redirects when forwarding? */
View
7 bsd/netinet6/nd6.c
@@ -1193,10 +1193,11 @@ nd6_lookup(
* use rt->rt_ifa->ifa_ifp, which would specify the REAL
* interface.
*/
- if (((ifp && (ifp->if_type != IFT_PPP)) && ((ifp->if_eflags & IFEF_NOAUTOIPV6LL) == 0)) &&
- ((rt->rt_flags & RTF_GATEWAY) || (rt->rt_flags & RTF_LLINFO) == 0 ||
+ if (ifp == NULL || (ifp->if_type == IFT_PPP) ||
+ (ifp->if_eflags & IFEF_NOAUTOIPV6LL) ||
+ (rt->rt_flags & RTF_GATEWAY) || (rt->rt_flags & RTF_LLINFO) == 0 ||
rt->rt_gateway->sa_family != AF_LINK || rt->rt_llinfo == NULL ||
- (ifp && rt->rt_ifa->ifa_ifp != ifp))) {
+ (ifp && rt->rt_ifa->ifa_ifp != ifp)) {
RT_REMREF_LOCKED(rt);
RT_UNLOCK(rt);
if (create) {
View
5 bsd/nfs/nfs_socket.c
@@ -3420,6 +3420,7 @@ nfs_noremotehang(thread_t thd)
int
nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocked)
{
+ proc_t p;
int error = 0;
if (nmp == NULL)
@@ -3468,8 +3469,8 @@ nfs_sigintr(struct nfsmount *nmp, struct nfsreq *req, thread_t thd, int nmplocke
return (EINTR);
/* mask off thread and process blocked signals. */
- if ((nmp->nm_flag & NFSMNT_INT) &&
- proc_pendingsignals(get_bsdthreadtask_info(thd), NFSINT_SIGMASK))
+ if ((nmp->nm_flag & NFSMNT_INT) && ((p = get_bsdthreadtask_info(thd))) &&
+ proc_pendingsignals(p, NFSINT_SIGMASK))
return (EINTR);
return (0);
}
View
1  bsd/sys/disk.h
@@ -141,6 +141,7 @@ typedef struct
#define DKIOCGETBLOCKCOUNT32 _IOR('d', 25, uint32_t)
#define DKIOCSETBLOCKSIZE _IOW('d', 24, uint32_t)
#define DKIOCGETBSDUNIT _IOR('d', 27, uint32_t)
+#define DKIOCISSOLIDSTATE _IOR('d', 79, uint32_t)
#define DKIOCISVIRTUAL _IOR('d', 72, uint32_t)
#define DKIOCGETBASE _IOR('d', 73, uint64_t)
#define DKIOCGETFEATURES _IOR('d', 76, uint32_t)
View
1  bsd/sys/kdebug.h
@@ -198,6 +198,7 @@ __BEGIN_DECLS
#define DBG_IOINFINIBAND 48 /* Infiniband */
#define DBG_IOCPUPM 49 /* CPU Power Management */
#define DBG_IOGRAPHICS 50 /* Graphics */
+#define DBG_HIBERNATE 51 /* hibernation related events */
/* Backwards compatibility */
#define DBG_IOPOINTING DBG_IOHID /* OBSOLETE: Use DBG_IOHID instead */
View
1  bsd/sys/mount_internal.h
@@ -239,6 +239,7 @@ extern struct mount * dead_mountp;
#define MNTK_LOCK_LOCAL 0x00100000 /* advisory locking is done above the VFS itself */
#define MNTK_VIRTUALDEV 0x00200000 /* mounted on a virtual device i.e. a disk image */
#define MNTK_ROOTDEV 0x00400000 /* this filesystem resides on the same device as the root */
+#define MNTK_SSD 0x00800000 /* underlying device is of the solid state variety */
#define MNTK_UNMOUNT 0x01000000 /* unmount in progress */
#define MNTK_MWAIT 0x02000000 /* waiting for unmount to finish */
#define MNTK_WANTRDWR 0x04000000 /* upgrade to read/write requested */
View
10 bsd/vfs/vfs_bio.c
@@ -125,7 +125,7 @@ static void buf_reassign(buf_t bp, vnode_t newvp);
static errno_t buf_acquire_locked(buf_t bp, int flags, int slpflag, int slptimeo);
static int buf_iterprepare(vnode_t vp, struct buflists *, int flags);
static void buf_itercomplete(vnode_t vp, struct buflists *, int flags);
-boolean_t buffer_cache_gc(void);
+boolean_t buffer_cache_gc(int);
__private_extern__ int bdwrite_internal(buf_t, int);
@@ -3676,12 +3676,16 @@ brecover_data(buf_t bp)
}
boolean_t
-buffer_cache_gc(void)
+buffer_cache_gc(int all)
{
buf_t bp;
boolean_t did_large_zfree = FALSE;
int now = buf_timestamp();
uint32_t count = 0;
+ int thresh_hold = BUF_STALE_THRESHHOLD;
+
+ if (all)
+ thresh_hold = 0;
lck_mtx_lock_spin(buf_mtxp);
@@ -3689,7 +3693,7 @@ buffer_cache_gc(void)
bp = TAILQ_FIRST(&bufqueues[BQ_META]);
/* Only collect buffers unused in the last N seconds. Note: ordered by timestamp. */
- while ((bp != NULL) && ((now - bp->b_timestamp) > BUF_STALE_THRESHHOLD) && (count < BUF_MAX_GC_COUNT)) {
+ while ((bp != NULL) && ((now - bp->b_timestamp) > thresh_hold) && (all || (count < BUF_MAX_GC_COUNT))) {
int result, size;
boolean_t is_zalloc;
View
17 bsd/vfs/vfs_cluster.c
@@ -3780,7 +3780,6 @@ cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type,
int force_data_sync;
int retval = 0;
int no_zero_fill = 0;
- int abort_flag = 0;
int io_flag = 0;
int misaligned = 0;
struct clios iostate;
@@ -3991,13 +3990,11 @@ cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type,
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_START,
(int)upl_offset, upl_needed_size, (int)iov_base, io_size, 0);
- if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0)) {
+ if (upl_offset == 0 && ((io_size & PAGE_MASK) == 0))
no_zero_fill = 1;
- abort_flag = UPL_ABORT_DUMP_PAGES | UPL_ABORT_FREE_ON_EMPTY;
- } else {
+ else
no_zero_fill = 0;
- abort_flag = UPL_ABORT_FREE_ON_EMPTY;
- }
+
for (force_data_sync = 0; force_data_sync < 3; force_data_sync++) {
pages_in_pl = 0;
upl_size = upl_needed_size;
@@ -4028,13 +4025,13 @@ cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type,
pl = UPL_GET_INTERNAL_PAGE_LIST(upl);
for (i = 0; i < pages_in_pl; i++) {
- if (!upl_valid_page(pl, i))
+ if (!upl_page_present(pl, i))
break;
}
if (i == pages_in_pl)
break;
- ubc_upl_abort(upl, abort_flag);
+ ubc_upl_abort(upl, 0);
}
if (force_data_sync >= 3) {
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
@@ -4052,7 +4049,7 @@ cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type,
io_size = 0;
}
if (io_size == 0) {
- ubc_upl_abort(upl, abort_flag);
+ ubc_upl_abort(upl, 0);
goto wait_for_dreads;
}
KERNEL_DEBUG((FSDBG_CODE(DBG_FSRW, 72)) | DBG_FUNC_END,
@@ -4100,7 +4097,7 @@ cluster_read_direct(vnode_t vp, struct uio *uio, off_t filesize, int *read_type,
* go wait for any other reads to complete before
* returning the error to the caller
*/
- ubc_upl_abort(upl, abort_flag);
+ ubc_upl_abort(upl, 0);
goto wait_for_dreads;
}
View
229 bsd/vfs/vfs_subr.c
@@ -2811,7 +2811,7 @@ vfs_init_io_attributes(vnode_t devvp, mount_t mp)
u_int64_t temp;
u_int32_t features;
vfs_context_t ctx = vfs_context_current();
-
+ int isssd = 0;
int isvirtual = 0;
/*
* determine if this mount point exists on the same device as the root
@@ -2860,6 +2860,10 @@ vfs_init_io_attributes(vnode_t devvp, mount_t mp)
if (isvirtual)
mp->mnt_kern_flag |= MNTK_VIRTUALDEV;
}
+ if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, ctx) == 0) {
+ if (isssd)
+ mp->mnt_kern_flag |= MNTK_SSD;
+ }
if ((error = VNOP_IOCTL(devvp, DKIOCGETFEATURES,
(caddr_t)&features, 0, ctx)))
@@ -4927,8 +4931,24 @@ vauth_node_owner(struct vnode_attr *vap, kauth_cred_t cred)
return(result);
}
+/*
+ * vauth_node_group
+ *
+ * Description: Ask if a cred is a member of the group owning the vnode object
+ *
+ * Parameters: vap vnode attribute
+ * vap->va_gid group owner of vnode object
+ * cred credential to check
+ * ismember pointer to where to put the answer
+ * idontknow Return this if we can't get an answer
+ *
+ * Returns: 0 Success
+ * idontknow Can't get information
+ * kauth_cred_ismember_gid:? Error from kauth subsystem
+ * kauth_cred_ismember_gid:? Error from kauth subsystem
+ */
static int
-vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember)
+vauth_node_group(struct vnode_attr *vap, kauth_cred_t cred, int *ismember, int