@@ -62,7 +62,7 @@ class DecompressMemMapReader(LazyMixin):
6262        hence we try to find a good tradeoff between allocation time and number of 
6363        times we actually allocate. An own zlib implementation would be good here 
6464        to better support streamed reading - it would only need to keep the mmap 
65-         and decompress it into chunks, thats  all ... """ 
65+         and decompress it into chunks, that's  all ... """ 
6666    __slots__  =  ('_m' , '_zip' , '_buf' , '_buflen' , '_br' , '_cws' , '_cwe' , '_s' , '_close' ,
6767                 '_cbr' , '_phi' )
6868
@@ -128,7 +128,7 @@ def new(self, m, close_on_deletion=False):
128128        This method parses the object header from m and returns the parsed 
129129        type and size, as well as the created stream instance. 
130130
131-         :param m: memory map on which to oparate . It must be object data ( header + contents ) 
131+         :param m: memory map on which to operate . It must be object data ( header + contents ) 
132132        :param close_on_deletion: if True, the memory map will be closed once we are 
133133            being deleted""" 
134134        inst  =  DecompressMemMapReader (m , close_on_deletion , 0 )
@@ -175,7 +175,7 @@ def compressed_bytes_read(self):
175175        # Only scrub the stream forward if we are officially done with the 
176176        # bytes we were to have. 
177177        if  self ._br  ==  self ._s  and  not  self ._zip .unused_data :
178-             # manipulate the bytes-read to allow our own read method to coninute  
178+             # manipulate the bytes-read to allow our own read method to continue  
179179            # but keep the window at its current position 
180180            self ._br  =  0 
181181            if  hasattr (self ._zip , 'status' ):
0 commit comments