@@ -173,7 +173,7 @@ def write_stream_to_pack(read, write, zstream, base_crc=None):
173173class IndexWriter (object ):
174174 """Utility to cache index information, allowing to write all information later
175175 in one go to the given stream
176- :note: currently only writes v2 indices"""
176+ **Note:** currently only writes v2 indices"""
177177 __slots__ = '_objs'
178178
179179 def __init__ (self ):
@@ -391,7 +391,8 @@ def indexfile_checksum(self):
391391
392392 def offsets (self ):
393393 """:return: sequence of all offsets in the order in which they were written
394- :note: return value can be random accessed, but may be immmutable"""
394+
395+ **Note:** return value can be random accessed, but may be immmutable"""
395396 if self ._version == 2 :
396397 # read stream to array, convert to tuple
397398 a = array .array ('I' ) # 4 byte unsigned int, long are 8 byte on 64 bit it appears
@@ -497,10 +498,10 @@ class PackFile(LazyMixin):
497498 packs therefor is 32 bit on 32 bit systems. On 64 bit systems, this should be
498499 fine though.
499500
500- :note: at some point, this might be implemented using streams as well, or
501- streams are an alternate path in the case memory maps cannot be created
502- for some reason - one clearly doesn't want to read 10GB at once in that
503- case"""
501+ **Note:** at some point, this might be implemented using streams as well, or
502+ streams are an alternate path in the case memory maps cannot be created
503+ for some reason - one clearly doesn't want to read 10GB at once in that
504+ case"""
504505
505506 __slots__ = ('_packpath' , '_cursor' , '_size' , '_version' )
506507 pack_signature = 0x5041434b # 'PACK'
@@ -625,8 +626,9 @@ def stream_iter(self, start_offset=0):
625626 to access the data in the pack directly.
626627 :param start_offset: offset to the first object to iterate. If 0, iteration
627628 starts at the very first object in the pack.
628- :note: Iterating a pack directly is costly as the datastream has to be decompressed
629- to determine the bounds between the objects"""
629+
630+ **Note:** Iterating a pack directly is costly as the datastream has to be decompressed
631+ to determine the bounds between the objects"""
630632 return self ._iter_objects (start_offset , as_stream = True )
631633
632634 #} END Read-Database like Interface
@@ -902,9 +904,11 @@ def write_pack(cls, object_iter, pack_write, index_write=None,
902904 :param zlib_compression: the zlib compression level to use
903905 :return: tuple(pack_sha, index_binsha) binary sha over all the contents of the pack
904906 and over all contents of the index. If index_write was None, index_binsha will be None
905- :note: The destination of the write functions is up to the user. It could
906- be a socket, or a file for instance
907- :note: writes only undeltified objects"""
907+
908+ **Note:** The destination of the write functions is up to the user. It could
909+ be a socket, or a file for instance
910+
911+ **Note:** writes only undeltified objects"""
908912 objs = object_iter
909913 if not object_count :
910914 if not isinstance (object_iter , (tuple , list )):
@@ -979,7 +983,8 @@ def create(cls, object_iter, base_dir, object_count = None, zlib_compression = z
979983 and corresponding index file. The pack contains all OStream objects contained in object iter.
980984 :param base_dir: directory which is to contain the files
981985 :return: PackEntity instance initialized with the new pack
982- :note: for more information on the other parameters see the write_pack method"""
986+
987+ **Note:** for more information on the other parameters see the write_pack method"""
983988 pack_fd , pack_path = tempfile .mkstemp ('' , 'pack' , base_dir )
984989 index_fd , index_path = tempfile .mkstemp ('' , 'index' , base_dir )
985990 pack_write = lambda d : os .write (pack_fd , d )
0 commit comments