Skip to content
Permalink
master
Switch branches/tags
Go to file
 
 
Cannot retrieve contributors at this time
# Copyright (C) 2007-2016 Giampaolo Rodola' <g.rodola@gmail.com>.
# Use of this source code is governed by MIT license that can be
# found in the LICENSE file.
import asynchat
import contextlib
import errno
import glob
import logging
import os
import random
import socket
import sys
import time
import traceback
import warnings
try:
import pwd
import grp
except ImportError:
pwd = grp = None
from . import __ver__
from ._compat import b
from ._compat import getcwdu
from ._compat import PY3
from ._compat import u
from ._compat import unicode
from ._compat import xrange
from .authorizers import AuthenticationFailed
from .authorizers import AuthorizerError
from .authorizers import DummyAuthorizer
from .filesystems import AbstractedFS
from .filesystems import FilesystemError
from .ioloop import _ERRNOS_DISCONNECTED
from .ioloop import _ERRNOS_RETRY
from .ioloop import Acceptor
from .ioloop import AsyncChat
from .ioloop import Connector
from .ioloop import RetryError
from .ioloop import timer
from .log import debug
from .log import logger
def _import_sendfile():
# By default attempt to use os.sendfile introduced in Python 3.3:
# http://bugs.python.org/issue10882
# ...otherwise fallback on using third-party pysendfile module:
# https://github.com/giampaolo/pysendfile/
if os.name == 'posix':
try:
return os.sendfile # py >= 3.3
except AttributeError:
try:
import sendfile as sf
# dirty hack to detect whether old 1.2.4 version is installed
if hasattr(sf, 'has_sf_hdtr'):
raise ImportError
return sf.sendfile
except ImportError:
pass
sendfile = _import_sendfile()
proto_cmds = {
'ABOR': dict(
perm=None, auth=True, arg=False,
help='Syntax: ABOR (abort transfer).'),
'ALLO': dict(
perm=None, auth=True, arg=True,
help='Syntax: ALLO <SP> bytes (noop; allocate storage).'),
'APPE': dict(
perm='a', auth=True, arg=True,
help='Syntax: APPE <SP> file-name (append data to file).'),
'CDUP': dict(
perm='e', auth=True, arg=False,
help='Syntax: CDUP (go to parent directory).'),
'CWD': dict(
perm='e', auth=True, arg=None,
help='Syntax: CWD [<SP> dir-name] (change working directory).'),
'DELE': dict(
perm='d', auth=True, arg=True,
help='Syntax: DELE <SP> file-name (delete file).'),
'EPRT': dict(
perm=None, auth=True, arg=True,
help='Syntax: EPRT <SP> |proto|ip|port| (extended active mode).'),
'EPSV': dict(
perm=None, auth=True, arg=None,
help='Syntax: EPSV [<SP> proto/"ALL"] (extended passive mode).'),
'FEAT': dict(
perm=None, auth=False, arg=False,
help='Syntax: FEAT (list all new features supported).'),
'HELP': dict(
perm=None, auth=False, arg=None,
help='Syntax: HELP [<SP> cmd] (show help).'),
'LIST': dict(
perm='l', auth=True, arg=None,
help='Syntax: LIST [<SP> path] (list files).'),
'MDTM': dict(
perm='l', auth=True, arg=True,
help='Syntax: MDTM [<SP> path] (file last modification time).'),
'MLSD': dict(
perm='l', auth=True, arg=None,
help='Syntax: MLSD [<SP> path] (list directory).'),
'MLST': dict(
perm='l', auth=True, arg=None,
help='Syntax: MLST [<SP> path] (show information about path).'),
'MODE': dict(
perm=None, auth=True, arg=True,
help='Syntax: MODE <SP> mode (noop; set data transfer mode).'),
'MKD': dict(
perm='m', auth=True, arg=True,
help='Syntax: MKD <SP> path (create directory).'),
'NLST': dict(
perm='l', auth=True, arg=None,
help='Syntax: NLST [<SP> path] (list path in a compact form).'),
'NOOP': dict(
perm=None, auth=False, arg=False,
help='Syntax: NOOP (just do nothing).'),
'OPTS': dict(
perm=None, auth=True, arg=True,
help='Syntax: OPTS <SP> cmd [<SP> option] (set option for command).'),
'PASS': dict(
perm=None, auth=False, arg=None,
help='Syntax: PASS [<SP> password] (set user password).'),
'PASV': dict(
perm=None, auth=True, arg=False,
help='Syntax: PASV (open passive data connection).'),
'PORT': dict(
perm=None, auth=True, arg=True,
help='Syntax: PORT <sp> h,h,h,h,p,p (open active data connection).'),
'PWD': dict(
perm=None, auth=True, arg=False,
help='Syntax: PWD (get current working directory).'),
'QUIT': dict(
perm=None, auth=False, arg=False,
help='Syntax: QUIT (quit current session).'),
'REIN': dict(
perm=None, auth=True, arg=False,
help='Syntax: REIN (flush account).'),
'REST': dict(
perm=None, auth=True, arg=True,
help='Syntax: REST <SP> offset (set file offset).'),
'RETR': dict(
perm='r', auth=True, arg=True,
help='Syntax: RETR <SP> file-name (retrieve a file).'),
'RMD': dict(
perm='d', auth=True, arg=True,
help='Syntax: RMD <SP> dir-name (remove directory).'),
'RNFR': dict(
perm='f', auth=True, arg=True,
help='Syntax: RNFR <SP> file-name (rename (source name)).'),
'RNTO': dict(
perm='f', auth=True, arg=True,
help='Syntax: RNTO <SP> file-name (rename (destination name)).'),
'SITE': dict(
perm=None, auth=False, arg=True,
help='Syntax: SITE <SP> site-command (execute SITE command).'),
'SITE HELP': dict(
perm=None, auth=False, arg=None,
help='Syntax: SITE HELP [<SP> cmd] (show SITE command help).'),
'SITE CHMOD': dict(
perm='M', auth=True, arg=True,
help='Syntax: SITE CHMOD <SP> mode path (change file mode).'),
'SIZE': dict(
perm='l', auth=True, arg=True,
help='Syntax: SIZE <SP> file-name (get file size).'),
'STAT': dict(
perm='l', auth=False, arg=None,
help='Syntax: STAT [<SP> path name] (server stats [list files]).'),
'STOR': dict(
perm='w', auth=True, arg=True,
help='Syntax: STOR <SP> file-name (store a file).'),
'STOU': dict(
perm='w', auth=True, arg=None,
help='Syntax: STOU [<SP> name] (store a file with a unique name).'),
'STRU': dict(
perm=None, auth=True, arg=True,
help='Syntax: STRU <SP> type (noop; set file structure).'),
'SYST': dict(
perm=None, auth=False, arg=False,
help='Syntax: SYST (get operating system type).'),
'TYPE': dict(
perm=None, auth=True, arg=True,
help='Syntax: TYPE <SP> [A | I] (set transfer type).'),
'USER': dict(
perm=None, auth=False, arg=True,
help='Syntax: USER <SP> user-name (set username).'),
'XCUP': dict(
perm='e', auth=True, arg=False,
help='Syntax: XCUP (obsolete; go to parent directory).'),
'XCWD': dict(
perm='e', auth=True, arg=None,
help='Syntax: XCWD [<SP> dir-name] (obsolete; change directory).'),
'XMKD': dict(
perm='m', auth=True, arg=True,
help='Syntax: XMKD <SP> dir-name (obsolete; create directory).'),
'XPWD': dict(
perm=None, auth=True, arg=False,
help='Syntax: XPWD (obsolete; get current dir).'),
'XRMD': dict(
perm='d', auth=True, arg=True,
help='Syntax: XRMD <SP> dir-name (obsolete; remove directory).'),
}
if not hasattr(os, 'chmod'):
del proto_cmds['SITE CHMOD']
def _strerror(err):
if isinstance(err, EnvironmentError):
try:
return os.strerror(err.errno)
except AttributeError:
# not available on PythonCE
if not hasattr(os, 'strerror'):
return err.strerror
raise
else:
return str(err)
def _support_hybrid_ipv6():
"""Return True if it is possible to use hybrid IPv6/IPv4 sockets
on this platform.
"""
# Note: IPPROTO_IPV6 constant is broken on Windows, see:
# http://bugs.python.org/issue6926
try:
if not socket.has_ipv6:
return False
with contextlib.closing(socket.socket(socket.AF_INET6)) as sock:
return not sock.getsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY)
except (socket.error, AttributeError):
return False
SUPPORTS_HYBRID_IPV6 = _support_hybrid_ipv6()
class _FileReadWriteError(OSError):
"""Exception raised when reading or writing a file during a transfer."""
class _GiveUpOnSendfile(Exception):
"""Exception raised in case use of sendfile() fails on first try,
in which case send() will be used.
"""
# --- DTP classes
class PassiveDTP(Acceptor):
"""Creates a socket listening on a local port, dispatching the
resultant connection to DTPHandler. Used for handling PASV command.
- (int) timeout: the timeout for a remote client to establish
connection with the listening socket. Defaults to 30 seconds.
- (int) backlog: the maximum number of queued connections passed
to listen(). If a connection request arrives when the queue is
full the client may raise ECONNRESET. Defaults to 5.
"""
timeout = 30
backlog = None
def __init__(self, cmd_channel, extmode=False):
"""Initialize the passive data server.
- (instance) cmd_channel: the command channel class instance.
- (bool) extmode: wheter use extended passive mode response type.
"""
self.cmd_channel = cmd_channel
self.log = cmd_channel.log
self.log_exception = cmd_channel.log_exception
Acceptor.__init__(self, ioloop=cmd_channel.ioloop)
local_ip = self.cmd_channel.socket.getsockname()[0]
if local_ip in self.cmd_channel.masquerade_address_map:
masqueraded_ip = self.cmd_channel.masquerade_address_map[local_ip]
elif self.cmd_channel.masquerade_address:
masqueraded_ip = self.cmd_channel.masquerade_address
else:
masqueraded_ip = None
if self.cmd_channel.server.socket.family != socket.AF_INET:
# dual stack IPv4/IPv6 support
af = self.bind_af_unspecified((local_ip, 0))
self.socket.close()
else:
af = self.cmd_channel.socket.family
self.create_socket(af, socket.SOCK_STREAM)
if self.cmd_channel.passive_ports is None:
# By using 0 as port number value we let kernel choose a
# free unprivileged random port.
self.bind((local_ip, 0))
else:
ports = list(self.cmd_channel.passive_ports)
while ports:
port = ports.pop(random.randint(0, len(ports) - 1))
self.set_reuse_addr()
try:
self.bind((local_ip, port))
except socket.error as err:
if err.errno == errno.EADDRINUSE: # port already in use
if ports:
continue
# If cannot use one of the ports in the configured
# range we'll use a kernel-assigned port, and log
# a message reporting the issue.
# By using 0 as port number value we let kernel
# choose a free unprivileged random port.
else:
self.bind((local_ip, 0))
self.cmd_channel.log(
"Can't find a valid passive port in the "
"configured range. A random kernel-assigned "
"port will be used.",
logfun=logger.warning
)
else:
raise
else:
break
self.listen(self.backlog or self.cmd_channel.server.backlog)
port = self.socket.getsockname()[1]
if not extmode:
ip = masqueraded_ip or local_ip
if ip.startswith('::ffff:'):
# In this scenario, the server has an IPv6 socket, but
# the remote client is using IPv4 and its address is
# represented as an IPv4-mapped IPv6 address which
# looks like this ::ffff:151.12.5.65, see:
# http://en.wikipedia.org/wiki/IPv6#IPv4-mapped_addresses
# http://tools.ietf.org/html/rfc3493.html#section-3.7
# We truncate the first bytes to make it look like a
# common IPv4 address.
ip = ip[7:]
# The format of 227 response in not standardized.
# This is the most expected:
resp = '227 Entering passive mode (%s,%d,%d).' % (
ip.replace('.', ','), port // 256, port % 256)
self.cmd_channel.respond(resp)
else:
self.cmd_channel.respond('229 Entering extended passive mode '
'(|||%d|).' % port)
if self.timeout:
self.call_later(self.timeout, self.handle_timeout)
# --- connection / overridden
def handle_accepted(self, sock, addr):
"""Called when remote client initiates a connection."""
if not self.cmd_channel.connected:
return self.close()
# Check the origin of data connection. If not expressively
# configured we drop the incoming data connection if remote
# IP address does not match the client's IP address.
if self.cmd_channel.remote_ip != addr[0]:
if not self.cmd_channel.permit_foreign_addresses:
try:
sock.close()
except socket.error:
pass
msg = '425 Rejected data connection from foreign address ' \
'%s:%s.' % (addr[0], addr[1])
self.cmd_channel.respond_w_warning(msg)
# do not close listening socket: it couldn't be client's blame
return
else:
# site-to-site FTP allowed
msg = 'Established data connection with foreign address ' \
'%s:%s.' % (addr[0], addr[1])
self.cmd_channel.log(msg, logfun=logger.warning)
# Immediately close the current channel (we accept only one
# connection at time) and avoid running out of max connections
# limit.
self.close()
# delegate such connection to DTP handler
if self.cmd_channel.connected:
handler = self.cmd_channel.dtp_handler(sock, self.cmd_channel)
if handler.connected:
self.cmd_channel.data_channel = handler
self.cmd_channel._on_dtp_connection()
def handle_timeout(self):
if self.cmd_channel.connected:
self.cmd_channel.respond("421 Passive data channel timed out.",
logfun=logging.info)
self.close()
def handle_error(self):
"""Called to handle any uncaught exceptions."""
try:
raise
except Exception:
logger.error(traceback.format_exc())
try:
self.close()
except Exception:
logger.critical(traceback.format_exc())
def close(self):
debug("call: close()", inst=self)
Acceptor.close(self)
class ActiveDTP(Connector):
"""Connects to remote client and dispatches the resulting connection
to DTPHandler. Used for handling PORT command.
- (int) timeout: the timeout for us to establish connection with
the client's listening data socket.
"""
timeout = 30
def __init__(self, ip, port, cmd_channel):
"""Initialize the active data channel attemping to connect
to remote data socket.
- (str) ip: the remote IP address.
- (int) port: the remote port.
- (instance) cmd_channel: the command channel class instance.
"""
Connector.__init__(self, ioloop=cmd_channel.ioloop)
self.cmd_channel = cmd_channel
self.log = cmd_channel.log
self.log_exception = cmd_channel.log_exception
self._idler = None
if self.timeout:
self._idler = self.ioloop.call_later(self.timeout,
self.handle_timeout,
_errback=self.handle_error)
if ip.count('.') == 4:
self._cmd = "PORT"
self._normalized_addr = "%s:%s" % (ip, port)
else:
self._cmd = "EPRT"
self._normalized_addr = "[%s]:%s" % (ip, port)
source_ip = self.cmd_channel.socket.getsockname()[0]
# dual stack IPv4/IPv6 support
try:
self.connect_af_unspecified((ip, port), (source_ip, 0))
except (socket.gaierror, socket.error):
self.handle_close()
def readable(self):
return False
def handle_write(self):
# overridden to prevent unhandled read/write event messages to
# be printed by asyncore on Python < 2.6
pass
def handle_connect(self):
"""Called when connection is established."""
self.del_channel()
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
if not self.cmd_channel.connected:
return self.close()
# fix for asyncore on python < 2.6, meaning we aren't
# actually connected.
# test_active_conn_error tests this condition
err = self.socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
if err != 0:
raise socket.error(err)
#
msg = 'Active data connection established.'
self.cmd_channel.respond('200 ' + msg)
self.cmd_channel.log_cmd(self._cmd, self._normalized_addr, 200, msg)
#
if not self.cmd_channel.connected:
return self.close()
# delegate such connection to DTP handler
handler = self.cmd_channel.dtp_handler(self.socket, self.cmd_channel)
self.cmd_channel.data_channel = handler
self.cmd_channel._on_dtp_connection()
def handle_timeout(self):
if self.cmd_channel.connected:
msg = "Active data channel timed out."
self.cmd_channel.respond("421 " + msg, logfun=logger.info)
self.cmd_channel.log_cmd(
self._cmd, self._normalized_addr, 421, msg)
self.close()
def handle_close(self):
# With the new IO loop, handle_close() gets called in case
# the fd appears in the list of exceptional fds.
# This means connect() failed.
if not self._closed:
self.close()
if self.cmd_channel.connected:
msg = "Can't connect to specified address."
self.cmd_channel.respond("425 " + msg)
self.cmd_channel.log_cmd(
self._cmd, self._normalized_addr, 425, msg)
def handle_error(self):
"""Called to handle any uncaught exceptions."""
try:
raise
except (socket.gaierror, socket.error):
pass
except Exception:
self.log_exception(self)
try:
self.handle_close()
except Exception:
logger.critical(traceback.format_exc())
def close(self):
debug("call: close()", inst=self)
if not self._closed:
Connector.close(self)
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
class DTPHandler(AsyncChat):
"""Class handling server-data-transfer-process (server-DTP, see
RFC-959) managing data-transfer operations involving sending
and receiving data.
Class attributes:
- (int) timeout: the timeout which roughly is the maximum time we
permit data transfers to stall for with no progress. If the
timeout triggers, the remote client will be kicked off
(defaults 300).
- (int) ac_in_buffer_size: incoming data buffer size (defaults 65536)
- (int) ac_out_buffer_size: outgoing data buffer size (defaults 65536)
"""
timeout = 300
ac_in_buffer_size = 65536
ac_out_buffer_size = 65536
def __init__(self, sock, cmd_channel):
"""Initialize the command channel.
- (instance) sock: the socket object instance of the newly
established connection.
- (instance) cmd_channel: the command channel class instance.
"""
self.cmd_channel = cmd_channel
self.file_obj = None
self.receive = False
self.transfer_finished = False
self.tot_bytes_sent = 0
self.tot_bytes_received = 0
self.cmd = None
self.log = cmd_channel.log
self.log_exception = cmd_channel.log_exception
self._data_wrapper = None
self._lastdata = 0
self._had_cr = False
self._start_time = timer()
self._resp = ()
self._offset = None
self._filefd = None
self._idler = None
self._initialized = False
try:
AsyncChat.__init__(self, sock, ioloop=cmd_channel.ioloop)
except socket.error as err:
# if we get an exception here we want the dispatcher
# instance to set socket attribute before closing, see:
# https://github.com/giampaolo/pyftpdlib/issues/188
AsyncChat.__init__(
self, socket.socket(), ioloop=cmd_channel.ioloop)
# https://github.com/giampaolo/pyftpdlib/issues/143
self.close()
if err.errno == errno.EINVAL:
return
self.handle_error()
return
# remove this instance from IOLoop's socket map
if not self.connected:
self.close()
return
if self.timeout:
self._idler = self.ioloop.call_every(self.timeout,
self.handle_timeout,
_errback=self.handle_error)
def __repr__(self):
try:
addr = "%s:%s" % self.socket.getpeername()[:2]
except socket.error:
addr = None
status = [self.__class__.__module__ + "." + self.__class__.__name__]
status.append("(addr=%s, user=%r, receive=%r, file=%r)"
% (addr, self.cmd_channel.username or '',
self.receive, getattr(self.file_obj, 'name', '')))
return '<%s>' % (' '.join(status))
__str__ = __repr__
def _use_sendfile(self, producer):
if not self.cmd_channel.use_sendfile:
debug("starting transfer not using sendfile(2) as per server "
"config", self)
return False
if not isinstance(producer, FileProducer):
debug("starting transfer not using sendfile(2) (directory "
"listing)", self)
return False
else:
if not hasattr(self.file_obj, "fileno"):
debug("starting transfer not using sendfile(2) %r has no "
"fileno() method" % self.file_obj, self)
return False
if not producer.type == 'i':
debug("starting transfer not using sendfile(2) (text file "
"transfer)", self)
return False
debug("starting transfer using sendfile()", self)
return True
def push(self, data):
self._initialized = True
self.modify_ioloop_events(self.ioloop.WRITE)
self._wanted_io_events = self.ioloop.WRITE
AsyncChat.push(self, data)
def push_with_producer(self, producer):
self._initialized = True
self.modify_ioloop_events(self.ioloop.WRITE)
self._wanted_io_events = self.ioloop.WRITE
if self._use_sendfile(producer):
self._offset = producer.file.tell()
self._filefd = self.file_obj.fileno()
try:
self.initiate_sendfile()
except _GiveUpOnSendfile:
pass
else:
self.initiate_send = self.initiate_sendfile
return
debug("starting transfer using send()", self)
AsyncChat.push_with_producer(self, producer)
def close_when_done(self):
asynchat.async_chat.close_when_done(self)
def initiate_send(self):
asynchat.async_chat.initiate_send(self)
def initiate_sendfile(self):
"""A wrapper around sendfile."""
try:
sent = sendfile(self._fileno, self._filefd, self._offset,
self.ac_out_buffer_size)
except OSError as err:
if err.errno in _ERRNOS_RETRY or err.errno == errno.EBUSY:
return
elif err.errno in _ERRNOS_DISCONNECTED:
self.handle_close()
else:
if self.tot_bytes_sent == 0:
logger.warning(
"sendfile() failed; falling back on using plain send")
raise _GiveUpOnSendfile
else:
raise
else:
if sent == 0:
# this signals the channel that the transfer is completed
self.discard_buffers()
self.handle_close()
else:
self._offset += sent
self.tot_bytes_sent += sent
# --- utility methods
def _posix_ascii_data_wrapper(self, chunk):
"""The data wrapper used for receiving data in ASCII mode on
systems using a single line terminator, handling those cases
where CRLF ('\r\n') gets delivered in two chunks.
"""
if self._had_cr:
chunk = b'\r' + chunk
if chunk.endswith(b'\r'):
self._had_cr = True
chunk = chunk[:-1]
else:
self._had_cr = False
return chunk.replace(b'\r\n', b(os.linesep))
def enable_receiving(self, type, cmd):
"""Enable receiving of data over the channel. Depending on the
TYPE currently in use it creates an appropriate wrapper for the
incoming data.
- (str) type: current transfer type, 'a' (ASCII) or 'i' (binary).
"""
self._initialized = True
self.modify_ioloop_events(self.ioloop.READ)
self._wanted_io_events = self.ioloop.READ
self.cmd = cmd
if type == 'a':
if os.linesep == '\r\n':
self._data_wrapper = None
else:
self._data_wrapper = self._posix_ascii_data_wrapper
elif type == 'i':
self._data_wrapper = None
else:
raise TypeError("unsupported type")
self.receive = True
def get_transmitted_bytes(self):
"""Return the number of transmitted bytes."""
return self.tot_bytes_sent + self.tot_bytes_received
def get_elapsed_time(self):
"""Return the transfer elapsed time in seconds."""
return timer() - self._start_time
def transfer_in_progress(self):
"""Return True if a transfer is in progress, else False."""
return self.get_transmitted_bytes() != 0
# --- connection
def send(self, data):
result = AsyncChat.send(self, data)
self.tot_bytes_sent += result
return result
def refill_buffer(self):
"""Overridden as a fix around http://bugs.python.org/issue1740572
(when the producer is consumed, close() was called instead of
handle_close()).
"""
while 1:
if len(self.producer_fifo):
p = self.producer_fifo.first()
# a 'None' in the producer fifo is a sentinel,
# telling us to close the channel.
if p is None:
if not self.ac_out_buffer:
self.producer_fifo.pop()
# self.close()
self.handle_close()
return
elif isinstance(p, str):
self.producer_fifo.pop()
self.ac_out_buffer += p
return
data = p.more()
if data:
self.ac_out_buffer = self.ac_out_buffer + data
return
else:
self.producer_fifo.pop()
else:
return
def handle_read(self):
"""Called when there is data waiting to be read."""
try:
chunk = self.recv(self.ac_in_buffer_size)
except RetryError:
pass
except socket.error:
self.handle_error()
else:
self.tot_bytes_received += len(chunk)
if not chunk:
self.transfer_finished = True
# self.close() # <-- asyncore.recv() already do that...
return
if self._data_wrapper is not None:
chunk = self._data_wrapper(chunk)
try:
self.file_obj.write(chunk)
except OSError as err:
raise _FileReadWriteError(err)
handle_read_event = handle_read # small speedup
def readable(self):
"""Predicate for inclusion in the readable for select()."""
# It the channel is not supposed to be receiving but yet it's
# in the list of readable events, that means it has been
# disconnected, in which case we explicitly close() it.
# This is necessary as differently from FTPHandler this channel
# is not supposed to be readable/writable at first, meaning the
# upper IOLoop might end up calling readable() repeatedly,
# hogging CPU resources.
if not self.receive and not self._initialized:
return self.close()
return self.receive
def writable(self):
"""Predicate for inclusion in the writable for select()."""
return not self.receive and asynchat.async_chat.writable(self)
def handle_timeout(self):
"""Called cyclically to check if data trasfer is stalling with
no progress in which case the client is kicked off.
"""
if self.get_transmitted_bytes() > self._lastdata:
self._lastdata = self.get_transmitted_bytes()
else:
msg = "Data connection timed out."
self._resp = ("421 " + msg, logger.info)
self.close()
self.cmd_channel.close_when_done()
def handle_error(self):
"""Called when an exception is raised and not otherwise handled."""
try:
raise
# an error could occur in case we fail reading / writing
# from / to file (e.g. file system gets full)
except _FileReadWriteError as err:
error = _strerror(err.errno)
except Exception:
# some other exception occurred; we don't want to provide
# confidential error messages
self.log_exception(self)
error = "Internal error"
try:
self._resp = ("426 %s; transfer aborted." % error, logger.warning)
self.close()
except Exception:
logger.critical(traceback.format_exc())
def handle_close(self):
"""Called when the socket is closed."""
# If we used channel for receiving we assume that transfer is
# finished when client closes the connection, if we used channel
# for sending we have to check that all data has been sent
# (responding with 226) or not (responding with 426).
# In both cases handle_close() is automatically called by the
# underlying asynchat module.
if not self._closed:
if self.receive:
self.transfer_finished = True
else:
self.transfer_finished = len(self.producer_fifo) == 0
try:
if self.transfer_finished:
self._resp = ("226 Transfer complete.", logger.debug)
else:
tot_bytes = self.get_transmitted_bytes()
self._resp = ("426 Transfer aborted; %d bytes transmitted."
% tot_bytes, logger.debug)
finally:
self.close()
def close(self):
"""Close the data channel, first attempting to close any remaining
file handles."""
debug("call: close()", inst=self)
if not self._closed:
# RFC-959 says we must close the connection before replying
AsyncChat.close(self)
if self._resp:
self.cmd_channel.respond(self._resp[0], logfun=self._resp[1])
if self.file_obj is not None and not self.file_obj.closed:
self.file_obj.close()
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
if self.file_obj is not None:
filename = self.file_obj.name
elapsed_time = round(self.get_elapsed_time(), 3)
self.cmd_channel.log_transfer(
cmd=self.cmd,
filename=self.file_obj.name,
receive=self.receive,
completed=self.transfer_finished,
elapsed=elapsed_time,
bytes=self.get_transmitted_bytes())
if self.transfer_finished:
if self.receive:
self.cmd_channel.on_file_received(filename)
else:
self.cmd_channel.on_file_sent(filename)
else:
if self.receive:
self.cmd_channel.on_incomplete_file_received(filename)
else:
self.cmd_channel.on_incomplete_file_sent(filename)
self.cmd_channel._on_dtp_close()
# dirty hack in order to turn AsyncChat into a new style class in
# python 2.x so that we can use super()
if PY3:
class _AsyncChatNewStyle(AsyncChat):
pass
else:
class _AsyncChatNewStyle(object, AsyncChat):
def __init__(self, *args, **kwargs):
super(object, self).__init__(*args, **kwargs) # bypass object
class ThrottledDTPHandler(_AsyncChatNewStyle, DTPHandler):
"""A DTPHandler subclass which wraps sending and receiving in a data
counter and temporarily "sleeps" the channel so that you burst to no
more than x Kb/sec average.
- (int) read_limit: the maximum number of bytes to read (receive)
in one second (defaults to 0 == no limit).
- (int) write_limit: the maximum number of bytes to write (send)
in one second (defaults to 0 == no limit).
- (bool) auto_sized_buffers: this option only applies when read
and/or write limits are specified. When enabled it bumps down
the data buffer sizes so that they are never greater than read
and write limits which results in a less bursty and smoother
throughput (default: True).
"""
read_limit = 0
write_limit = 0
auto_sized_buffers = True
def __init__(self, sock, cmd_channel):
super(ThrottledDTPHandler, self).__init__(sock, cmd_channel)
self._timenext = 0
self._datacount = 0
self.sleeping = False
self._throttler = None
if self.auto_sized_buffers:
if self.read_limit:
while self.ac_in_buffer_size > self.read_limit:
self.ac_in_buffer_size /= 2
if self.write_limit:
while self.ac_out_buffer_size > self.write_limit:
self.ac_out_buffer_size /= 2
self.ac_in_buffer_size = int(self.ac_in_buffer_size)
self.ac_out_buffer_size = int(self.ac_out_buffer_size)
def _use_sendfile(self, producer):
return False
def recv(self, buffer_size):
chunk = super(ThrottledDTPHandler, self).recv(buffer_size)
if self.read_limit:
self._throttle_bandwidth(len(chunk), self.read_limit)
return chunk
def send(self, data):
num_sent = super(ThrottledDTPHandler, self).send(data)
if self.write_limit:
self._throttle_bandwidth(num_sent, self.write_limit)
return num_sent
def _cancel_throttler(self):
if self._throttler is not None and not self._throttler.cancelled:
self._throttler.cancel()
def _throttle_bandwidth(self, len_chunk, max_speed):
"""A method which counts data transmitted so that you burst to
no more than x Kb/sec average.
"""
self._datacount += len_chunk
if self._datacount >= max_speed:
self._datacount = 0
now = timer()
sleepfor = (self._timenext - now) * 2
if sleepfor > 0:
# we've passed bandwidth limits
def unsleep():
if self.receive:
event = self.ioloop.READ
else:
event = self.ioloop.WRITE
self.add_channel(events=event)
self.del_channel()
self._cancel_throttler()
self._throttler = self.ioloop.call_later(
sleepfor, unsleep, _errback=self.handle_error)
self._timenext = now + 1
def close(self):
self._cancel_throttler()
super(ThrottledDTPHandler, self).close()
# --- producers
class FileProducer(object):
"""Producer wrapper for file[-like] objects."""
buffer_size = 65536
def __init__(self, file, type):
"""Initialize the producer with a data_wrapper appropriate to TYPE.
- (file) file: the file[-like] object.
- (str) type: the current TYPE, 'a' (ASCII) or 'i' (binary).
"""
self.file = file
self.type = type
if type == 'a' and os.linesep != '\r\n':
self._data_wrapper = lambda x: x.replace(b(os.linesep), b'\r\n')
else:
self._data_wrapper = None
def more(self):
"""Attempt a chunk of data of size self.buffer_size."""
try:
data = self.file.read(self.buffer_size)
except OSError as err:
raise _FileReadWriteError(err)
else:
if self._data_wrapper is not None:
data = self._data_wrapper(data)
return data
class BufferedIteratorProducer(object):
"""Producer for iterator objects with buffer capabilities."""
# how many times iterator.next() will be called before
# returning some data
loops = 20
def __init__(self, iterator):
self.iterator = iterator
def more(self):
"""Attempt a chunk of data from iterator by calling
its next() method different times.
"""
buffer = []
for x in xrange(self.loops):
try:
buffer.append(next(self.iterator))
except StopIteration:
break
return b''.join(buffer)
# --- FTP
class FTPHandler(AsyncChat):
"""Implements the FTP server Protocol Interpreter (see RFC-959),
handling commands received from the client on the control channel.
All relevant session information is stored in class attributes
reproduced below and can be modified before instantiating this
class.
- (int) timeout:
The timeout which is the maximum time a remote client may spend
between FTP commands. If the timeout triggers, the remote client
will be kicked off. Defaults to 300 seconds.
- (str) banner: the string sent when client connects.
- (int) max_login_attempts:
the maximum number of wrong authentications before disconnecting
the client (default 3).
- (bool)permit_foreign_addresses:
FTP site-to-site transfer feature: also referenced as "FXP" it
permits for transferring a file between two remote FTP servers
without the transfer going through the client's host (not
recommended for security reasons as described in RFC-2577).
Having this attribute set to False means that all data
connections from/to remote IP addresses which do not match the
client's IP address will be dropped (defualt False).
- (bool) permit_privileged_ports:
set to True if you want to permit active data connections (PORT)
over privileged ports (not recommended, defaulting to False).
- (str) masquerade_address:
the "masqueraded" IP address to provide along PASV reply when
pyftpdlib is running behind a NAT or other types of gateways.
When configured pyftpdlib will hide its local address and
instead use the public address of your NAT (default None).
- (dict) masquerade_address_map:
in case the server has multiple IP addresses which are all
behind a NAT router, you may wish to specify individual
masquerade_addresses for each of them. The map expects a
dictionary containing private IP addresses as keys, and their
corresponding public (masquerade) addresses as values.
- (list) passive_ports:
what ports the ftpd will use for its passive data transfers.
Value expected is a list of integers (e.g. range(60000, 65535)).
When configured pyftpdlib will no longer use kernel-assigned
random ports (default None).
- (bool) use_gmt_times:
when True causes the server to report all ls and MDTM times in
GMT and not local time (default True).
- (bool) use_sendfile: when True uses sendfile() system call to
send a file resulting in faster uploads (from server to client).
Works on UNIX only and requires pysendfile module to be
installed separately:
https://github.com/giampaolo/pysendfile/
Automatically defaults to True if pysendfile module is
installed.
- (bool) tcp_no_delay: controls the use of the TCP_NODELAY socket
option which disables the Nagle algorithm resulting in
significantly better performances (default True on all systems
where it is supported).
- (str) unicode_errors:
the error handler passed to ''.encode() and ''.decode():
http://docs.python.org/library/stdtypes.html#str.decode
(detaults to 'replace').
- (str) log_prefix:
the prefix string preceding any log line; all instance
attributes can be used as arguments.
All relevant instance attributes initialized when client connects
are reproduced below. You may be interested in them in case you
want to subclass the original FTPHandler.
- (bool) authenticated: True if client authenticated himself.
- (str) username: the name of the connected user (if any).
- (int) attempted_logins: number of currently attempted logins.
- (str) current_type: the current transfer type (default "a")
- (int) af: the connection's address family (IPv4/IPv6)
- (instance) server: the FTPServer class instance.
- (instance) data_channel: the data channel instance (if any).
"""
# these are overridable defaults
# default classes
authorizer = DummyAuthorizer()
active_dtp = ActiveDTP
passive_dtp = PassiveDTP
dtp_handler = DTPHandler
abstracted_fs = AbstractedFS
proto_cmds = proto_cmds
# session attributes (explained in the docstring)
timeout = 300
banner = "pyftpdlib %s ready." % __ver__
max_login_attempts = 3
permit_foreign_addresses = False
permit_privileged_ports = False
masquerade_address = None
masquerade_address_map = {}
passive_ports = None
use_gmt_times = True
use_sendfile = sendfile is not None
tcp_no_delay = hasattr(socket, "TCP_NODELAY")
unicode_errors = 'replace'
log_prefix = '%(remote_ip)s:%(remote_port)s-[%(username)s]'
auth_failed_timeout = 3
def __init__(self, conn, server, ioloop=None):
"""Initialize the command channel.
- (instance) conn: the socket object instance of the newly
established connection.
- (instance) server: the ftp server class instance.
"""
# public session attributes
self.server = server
self.fs = None
self.authenticated = False
self.username = ""
self.password = ""
self.attempted_logins = 0
self.data_channel = None
self.remote_ip = ""
self.remote_port = ""
self.started = time.time()
# private session attributes
self._last_response = ""
self._current_type = 'a'
self._restart_position = 0
self._quit_pending = False
self._in_buffer = []
self._in_buffer_len = 0
self._epsvall = False
self._dtp_acceptor = None
self._dtp_connector = None
self._in_dtp_queue = None
self._out_dtp_queue = None
self._extra_feats = []
self._current_facts = ['type', 'perm', 'size', 'modify']
self._rnfr = None
self._idler = None
self._log_debug = logging.getLogger('pyftpdlib').getEffectiveLevel() \
<= logging.DEBUG
if os.name == 'posix':
self._current_facts.append('unique')
self._available_facts = self._current_facts[:]
if pwd and grp:
self._available_facts += ['unix.mode', 'unix.uid', 'unix.gid']
if os.name == 'nt':
self._available_facts.append('create')
try:
AsyncChat.__init__(self, conn, ioloop=ioloop)
except socket.error as err:
# if we get an exception here we want the dispatcher
# instance to set socket attribute before closing, see:
# https://github.com/giampaolo/pyftpdlib/issues/188
AsyncChat.__init__(self, socket.socket(), ioloop=ioloop)
self.close()
debug("call: FTPHandler.__init__, err %r" % err, self)
if err.errno == errno.EINVAL:
# https://github.com/giampaolo/pyftpdlib/issues/143
return
self.handle_error()
return
self.set_terminator(b"\r\n")
# connection properties
try:
self.remote_ip, self.remote_port = self.socket.getpeername()[:2]
except socket.error as err:
debug("call: FTPHandler.__init__, err on getpeername() %r" % err,
self)
# A race condition may occur if the other end is closing
# before we can get the peername, hence ENOTCONN (see issue
# #100) while EINVAL can occur on OSX (see issue #143).
self.connected = False
if err.errno in (errno.ENOTCONN, errno.EINVAL):
self.close()
else:
self.handle_error()
return
else:
self.log("FTP session opened (connect)")
# try to handle urgent data inline
try:
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_OOBINLINE, 1)
except socket.error as err:
debug("call: FTPHandler.__init__, err on SO_OOBINLINE %r" % err,
self)
# disable Nagle algorithm for the control socket only, resulting
# in significantly better performances
if self.tcp_no_delay:
try:
self.socket.setsockopt(socket.SOL_TCP, socket.TCP_NODELAY, 1)
except socket.error:
debug(
"call: FTPHandler.__init__, err on TCP_NODELAY %r" % err,
self)
# remove this instance from IOLoop's socket_map
if not self.connected:
self.close()
return
if self.timeout:
self._idler = self.ioloop.call_later(
self.timeout, self.handle_timeout, _errback=self.handle_error)
def __repr__(self):
status = [self.__class__.__module__ + "." + self.__class__.__name__]
status.append("(addr=%s:%s, user=%r)" % (self.remote_ip,
self.remote_port, self.username or ''))
return '<%s>' % (' '.join(status))
__str__ = __repr__
def handle(self):
"""Return a 220 'ready' response to the client over the command
channel.
"""
self.on_connect()
if not self._closed and not self._closing:
if len(self.banner) <= 75:
self.respond("220 %s" % str(self.banner))
else:
self.push('220-%s\r\n' % str(self.banner))
self.respond('220 ')
def handle_max_cons(self):
"""Called when limit for maximum number of connections is reached."""
msg = "421 Too many connections. Service temporarily unavailable."
self.respond_w_warning(msg)
# If self.push is used, data could not be sent immediately in
# which case a new "loop" will occur exposing us to the risk of
# accepting new connections. Since this could cause asyncore to
# run out of fds in case we're using select() on Windows we
# immediately close the channel by using close() instead of
# close_when_done(). If data has not been sent yet client will
# be silently disconnected.
self.close()
def handle_max_cons_per_ip(self):
"""Called when too many clients are connected from the same IP."""
msg = "421 Too many connections from the same IP address."
self.respond_w_warning(msg)
self.close_when_done()
def handle_timeout(self):
"""Called when client does not send any command within the time
specified in <timeout> attribute."""
msg = "Control connection timed out."
self.respond("421 " + msg, logfun=logger.info)
self.close_when_done()
# --- asyncore / asynchat overridden methods
def readable(self):
# Checking for self.connected seems to be necessary as per:
# https://github.com/giampaolo/pyftpdlib/issues/188#c18
# In contrast to DTPHandler, here we are not interested in
# attempting to receive any further data from a closed socket.
return self.connected and AsyncChat.readable(self)
def writable(self):
return self.connected and AsyncChat.writable(self)
def collect_incoming_data(self, data):
"""Read incoming data and append to the input buffer."""
self._in_buffer.append(data)
self._in_buffer_len += len(data)
# Flush buffer if it gets too long (possible DoS attacks).
# RFC-959 specifies that a 500 response could be given in
# such cases
buflimit = 2048
if self._in_buffer_len > buflimit:
self.respond_w_warning('500 Command too long.')
self._in_buffer = []
self._in_buffer_len = 0
def decode(self, bytes):
return bytes.decode('utf8', self.unicode_errors)
def found_terminator(self):
r"""Called when the incoming data stream matches the \r\n
terminator.
"""
if self._idler is not None and not self._idler.cancelled:
self._idler.reset()
line = b''.join(self._in_buffer)
try:
line = self.decode(line)
except UnicodeDecodeError:
# By default we'll never get here as we replace errors
# but user might want to override this behavior.
# RFC-2640 doesn't mention what to do in this case so
# we'll just return 501 (bad arg).
return self.respond("501 Can't decode command.")
self._in_buffer = []
self._in_buffer_len = 0
cmd = line.split(' ')[0].upper()
arg = line[len(cmd) + 1:]
try:
self.pre_process_command(line, cmd, arg)
except UnicodeEncodeError:
self.respond("501 can't decode path (server filesystem encoding "
"is %s)" % sys.getfilesystemencoding())
def pre_process_command(self, line, cmd, arg):
kwargs = {}
if cmd == "SITE" and arg:
cmd = "SITE %s" % arg.split(' ')[0].upper()
arg = line[len(cmd) + 1:]
if cmd != 'PASS':
self.logline("<- %s" % line)
else:
self.logline("<- %s %s" % (line.split(' ')[0], '*' * 6))
# Recognize those commands having a "special semantic". They
# should be sent by following the RFC-959 procedure of sending
# Telnet IP/Synch sequence (chr 242 and 255) as OOB data but
# since many ftp clients don't do it correctly we check the
# last 4 characters only.
if cmd not in self.proto_cmds:
if cmd[-4:] in ('ABOR', 'STAT', 'QUIT'):
cmd = cmd[-4:]
else:
msg = 'Command "%s" not understood.' % cmd
self.respond('500 ' + msg)
if cmd:
self.log_cmd(cmd, arg, 500, msg)
return
if not arg and self.proto_cmds[cmd]['arg'] == True: # NOQA
msg = "Syntax error: command needs an argument."
self.respond("501 " + msg)
self.log_cmd(cmd, "", 501, msg)
return
if arg and self.proto_cmds[cmd]['arg'] == False: # NOQA
msg = "Syntax error: command does not accept arguments."
self.respond("501 " + msg)
self.log_cmd(cmd, arg, 501, msg)
return
if not self.authenticated:
if self.proto_cmds[cmd]['auth'] or (cmd == 'STAT' and arg):
msg = "Log in with USER and PASS first."
self.respond("530 " + msg)
self.log_cmd(cmd, arg, 530, msg)
else:
# call the proper ftp_* method
self.process_command(cmd, arg)
return
else:
if (cmd == 'STAT') and not arg:
self.ftp_STAT(u(''))
return
# for file-system related commands check whether real path
# destination is valid
if self.proto_cmds[cmd]['perm'] and (cmd != 'STOU'):
if cmd in ('CWD', 'XCWD'):
arg = self.fs.ftp2fs(arg or u('/'))
elif cmd in ('CDUP', 'XCUP'):
arg = self.fs.ftp2fs(u('..'))
elif cmd == 'LIST':
if arg.lower() in ('-a', '-l', '-al', '-la'):
arg = self.fs.ftp2fs(self.fs.cwd)
else:
arg = self.fs.ftp2fs(arg or self.fs.cwd)
elif cmd == 'STAT':
if glob.has_magic(arg):
msg = 'Globbing not supported.'
self.respond('550 ' + msg)
self.log_cmd(cmd, arg, 550, msg)
return
arg = self.fs.ftp2fs(arg or self.fs.cwd)
elif cmd == 'SITE CHMOD':
if ' ' not in arg:
msg = "Syntax error: command needs two arguments."
self.respond("501 " + msg)
self.log_cmd(cmd, "", 501, msg)
return
else:
mode, arg = arg.split(' ', 1)
arg = self.fs.ftp2fs(arg)
kwargs = dict(mode=mode)
else: # LIST, NLST, MLSD, MLST
arg = self.fs.ftp2fs(arg or self.fs.cwd)
if not self.fs.validpath(arg):
line = self.fs.fs2ftp(arg)
msg = '"%s" points to a path which is outside ' \
"the user's root directory" % line
self.respond("550 %s." % msg)
self.log_cmd(cmd, arg, 550, msg)
return
# check permission
perm = self.proto_cmds[cmd]['perm']
if perm is not None and cmd != 'STOU':
if not self.authorizer.has_perm(self.username, perm, arg):
msg = "Not enough privileges."
self.respond("550 " + msg)
self.log_cmd(cmd, arg, 550, msg)
return
# call the proper ftp_* method
self.process_command(cmd, arg, **kwargs)
def process_command(self, cmd, *args, **kwargs):
"""Process command by calling the corresponding ftp_* class
method (e.g. for received command "MKD pathname", ftp_MKD()
method is called with "pathname" as the argument).
"""
if self._closed:
return
self._last_response = ""
method = getattr(self, 'ftp_' + cmd.replace(' ', '_'))
method(*args, **kwargs)
if self._last_response:
code = int(self._last_response[:3])
resp = self._last_response[4:]
self.log_cmd(cmd, args[0], code, resp)
def handle_error(self):
try:
self.log_exception(self)
self.close()
except Exception:
logger.critical(traceback.format_exc())
def handle_close(self):
self.close()
def close(self):
"""Close the current channel disconnecting the client."""
debug("call: close()", inst=self)
if not self._closed:
AsyncChat.close(self)
self._shutdown_connecting_dtp()
if self.data_channel is not None:
self.data_channel.close()
del self.data_channel
if self._out_dtp_queue is not None:
file = self._out_dtp_queue[2]
if file is not None:
file.close()
if self._in_dtp_queue is not None:
file = self._in_dtp_queue[0]
if file is not None:
file.close()
del self._out_dtp_queue
del self._in_dtp_queue
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
# remove client IP address from ip map
if self.remote_ip in self.server.ip_map:
self.server.ip_map.remove(self.remote_ip)
if self.fs is not None:
self.fs.cmd_channel = None
self.fs = None
self.log("FTP session closed (disconnect).")
# Having self.remote_ip not set means that no connection
# actually took place, hence we're not interested in
# invoking the callback.
if self.remote_ip:
self.ioloop.call_later(0, self.on_disconnect,
_errback=self.handle_error)
def _shutdown_connecting_dtp(self):
"""Close any ActiveDTP or PassiveDTP instance waiting to
establish a connection (passive or active).
"""
if self._dtp_acceptor is not None:
self._dtp_acceptor.close()
self._dtp_acceptor = None
if self._dtp_connector is not None:
self._dtp_connector.close()
self._dtp_connector = None
# --- public callbacks
# Note: to run a time consuming task make sure to use a separate
# process or thread (see FAQs).
def on_connect(self):
"""Called when client connects, *before* sending the initial
220 reply.
"""
def on_disconnect(self):
"""Called when connection is closed."""
def on_login(self, username):
"""Called on user login."""
def on_login_failed(self, username, password):
"""Called on failed login attempt.
At this point client might have already been disconnected if it
failed too many times.
"""
def on_logout(self, username):
"""Called when user "cleanly" logs out due to QUIT or USER
issued twice (re-login). This is not called if the connection
is simply closed by client.
"""
def on_file_sent(self, file):
"""Called every time a file has been succesfully sent.
"file" is the absolute name of the file just being sent.
"""
def on_file_received(self, file):
"""Called every time a file has been succesfully received.
"file" is the absolute name of the file just being received.
"""
def on_incomplete_file_sent(self, file):
"""Called every time a file has not been entirely sent.
(e.g. ABOR during transfer or client disconnected).
"file" is the absolute name of that file.
"""
def on_incomplete_file_received(self, file):
"""Called every time a file has not been entirely received
(e.g. ABOR during transfer or client disconnected).
"file" is the absolute name of that file.
"""
# --- internal callbacks
def _on_dtp_connection(self):
"""Called every time data channel connects, either active or
passive.
Incoming and outgoing queues are checked for pending data.
If outbound data is pending, it is pushed into the data channel.
If awaiting inbound data, the data channel is enabled for
receiving.
"""
# Close accepting DTP only. By closing ActiveDTP DTPHandler
# would receive a closed socket object.
# self._shutdown_connecting_dtp()
if self._dtp_acceptor is not None:
self._dtp_acceptor.close()
self._dtp_acceptor = None
# stop the idle timer as long as the data transfer is not finished
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
# check for data to send
if self._out_dtp_queue is not None:
data, isproducer, file, cmd = self._out_dtp_queue
self._out_dtp_queue = None
self.data_channel.cmd = cmd
if file:
self.data_channel.file_obj = file
try:
if not isproducer:
self.data_channel.push(data)
else:
self.data_channel.push_with_producer(data)
if self.data_channel is not None:
self.data_channel.close_when_done()
except Exception:
# dealing with this exception is up to DTP (see bug #84)
self.data_channel.handle_error()
# check for data to receive
elif self._in_dtp_queue is not None:
file, cmd = self._in_dtp_queue
self.data_channel.file_obj = file
self._in_dtp_queue = None
self.data_channel.enable_receiving(self._current_type, cmd)
def _on_dtp_close(self):
"""Called every time the data channel is closed."""
self.data_channel = None
if self._quit_pending:
self.close()
elif self.timeout:
# data transfer finished, restart the idle timer
if self._idler is not None and not self._idler.cancelled:
self._idler.cancel()
self._idler = self.ioloop.call_later(
self.timeout, self.handle_timeout, _errback=self.handle_error)
# --- utility
def push(self, s):
asynchat.async_chat.push(self, s.encode('utf8'))
def respond(self, resp, logfun=logger.debug):
"""Send a response to the client using the command channel."""
self._last_response = resp
self.push(resp + '\r\n')
if self._log_debug:
self.logline('-> %s' % resp, logfun=logfun)
else:
self.log(resp[4:], logfun=logfun)
def respond_w_warning(self, resp):
self.respond(resp, logfun=logger.warning)
def push_dtp_data(self, data, isproducer=False, file=None, cmd=None):
"""Pushes data into the data channel.
It is usually called for those commands requiring some data to
be sent over the data channel (e.g. RETR).
If data channel does not exist yet, it queues the data to send
later; data will then be pushed into data channel when
_on_dtp_connection() will be called.
- (str/classobj) data: the data to send which may be a string
or a producer object).
- (bool) isproducer: whether treat data as a producer.
- (file) file: the file[-like] object to send (if any).
"""
if self.data_channel is not None:
self.respond(
"125 Data connection already open. Transfer starting.")
if file:
self.data_channel.file_obj = file
try:
if not isproducer:
self.data_channel.push(data)
else:
self.data_channel.push_with_producer(data)
if self.data_channel is not None:
self.data_channel.cmd = cmd
self.data_channel.close_when_done()
except Exception:
# dealing with this exception is up to DTP (see bug #84)
self.data_channel.handle_error()
else:
self.respond(
"150 File status okay. About to open data connection.")
self._out_dtp_queue = (data, isproducer, file, cmd)
def flush_account(self):
"""Flush account information by clearing attributes that need
to be reset on a REIN or new USER command.
"""
self._shutdown_connecting_dtp()
# if there's a transfer in progress RFC-959 states we are
# supposed to let it finish
if self.data_channel is not None:
if not self.data_channel.transfer_in_progress():
self.data_channel.close()
self.data_channel = None
username = self.username
if self.authenticated and username:
self.on_logout(username)
self.authenticated = False
self.username = ""
self.password = ""
self.attempted_logins = 0
self._current_type = 'a'
self._restart_position = 0
self._quit_pending = False
self._in_dtp_queue = None
self._rnfr = None
self._out_dtp_queue = None
def run_as_current_user(self, function, *args, **kwargs):
"""Execute a function impersonating the current logged-in user."""
self.authorizer.impersonate_user(self.username, self.password)
try:
return function(*args, **kwargs)
finally:
self.authorizer.terminate_impersonation(self.username)
# --- logging wrappers
# this is defined earlier
# log_prefix = '%(remote_ip)s:%(remote_port)s-[%(username)s]'
def log(self, msg, logfun=logger.info):
"""Log a message, including additional identifying session data."""
prefix = self.log_prefix % self.__dict__
logfun("%s %s" % (prefix, msg))
def logline(self, msg, logfun=logger.debug):
"""Log a line including additional indentifying session data.
By default this is disabled unless logging level == DEBUG.
"""
if self._log_debug:
prefix = self.log_prefix % self.__dict__
logfun("%s %s" % (prefix, msg))
def logerror(self, msg):
"""Log an error including additional indentifying session data."""
prefix = self.log_prefix % self.__dict__
logger.error("%s %s" % (prefix, msg))
def log_exception(self, instance):
"""Log an unhandled exception. 'instance' is the instance
where the exception was generated.
"""
logger.exception("unhandled exception in instance %r", instance)
# the list of commands which gets logged when logging level
# is >= logging.INFO
log_cmds_list = ["DELE", "RNFR", "RNTO", "MKD", "RMD", "CWD",
"XMKD", "XRMD", "XCWD",
"REIN", "SITE CHMOD"]
def log_cmd(self, cmd, arg, respcode, respstr):
"""Log commands and responses in a standardized format.
This is disabled in case the logging level is set to DEBUG.
- (str) cmd:
the command sent by client
- (str) arg:
the command argument sent by client.
For filesystem commands such as DELE, MKD, etc. this is
already represented as an absolute real filesystem path
like "/home/user/file.ext".
- (int) respcode:
the response code as being sent by server. Response codes
starting with 4xx or 5xx are returned if the command has
been rejected for some reason.
- (str) respstr:
the response string as being sent by server.
By default only DELE, RMD, RNTO, MKD, CWD, ABOR, REIN, SITE CHMOD
commands are logged and the output is redirected to self.log
method.
Can be overridden to provide alternate formats or to log
further commands.
"""
if not self._log_debug and cmd in self.log_cmds_list:
line = '%s %s' % (' '.join([cmd, arg]).strip(), respcode)
if str(respcode)[0] in ('4', '5'):
line += ' %r' % respstr
self.log(line)
def log_transfer(self, cmd, filename, receive, completed, elapsed, bytes):
"""Log all file transfers in a standardized format.
- (str) cmd:
the original command who caused the tranfer.
- (str) filename:
the absolutized name of the file on disk.
- (bool) receive:
True if the transfer was used for client uploading (STOR,
STOU, APPE), False otherwise (RETR).
- (bool) completed:
True if the file has been entirely sent, else False.
- (float) elapsed:
transfer elapsed time in seconds.
- (int) bytes:
number of bytes transmitted.
"""
line = '%s %s completed=%s bytes=%s seconds=%s' % \
(cmd, filename, completed and 1 or 0, bytes, elapsed)
self.log(line)
# --- connection
def _make_eport(self, ip, port):
"""Establish an active data channel with remote client which
issued a PORT or EPRT command.
"""
# FTP bounce attacks protection: according to RFC-2577 it's
# recommended to reject PORT if IP address specified in it
# does not match client IP address.
remote_ip = self.remote_ip
if remote_ip.startswith('::ffff:'):
# In this scenario, the server has an IPv6 socket, but
# the remote client is using IPv4 and its address is
# represented as an IPv4-mapped IPv6 address which
# looks like this ::ffff:151.12.5.65, see:
# http://en.wikipedia.org/wiki/IPv6#IPv4-mapped_addresses
# http://tools.ietf.org/html/rfc3493.html#section-3.7
# We truncate the first bytes to make it look like a
# common IPv4 address.
remote_ip = remote_ip[7:]
if not self.permit_foreign_addresses and ip != remote_ip:
msg = "501 Rejected data connection to foreign address %s:%s." \
% (ip, port)
self.respond_w_warning(msg)
return
# ...another RFC-2577 recommendation is rejecting connections
# to privileged ports (< 1024) for security reasons.
if not self.permit_privileged_ports and port < 1024:
msg = '501 PORT against the privileged port "%s" refused.' % port
self.respond_w_warning(msg)
return
# close establishing DTP instances, if any
self._shutdown_connecting_dtp()
if self.data_channel is not None:
self.data_channel.close()
self.data_channel = None
# make sure we are not hitting the max connections limit
if not self.server._accept_new_cons():
msg = "425 Too many connections. Can't open data channel."
self.respond_w_warning(msg)
return
# open data channel
self._dtp_connector = self.active_dtp(ip, port, self)
def _make_epasv(self, extmode=False):
"""Initialize a passive data channel with remote client which
issued a PASV or EPSV command.
If extmode argument is True we assume that client issued EPSV in
which case extended passive mode will be used (see RFC-2428).
"""
# close establishing DTP instances, if any
self._shutdown_connecting_dtp()
# close established data connections, if any
if self.data_channel is not None:
self.data_channel.close()
self.data_channel = None
# make sure we are not hitting the max connections limit
if not self.server._accept_new_cons():
msg = "425 Too many connections. Can't open data channel."
self.respond_w_warning(msg)
return
# open data channel
self._dtp_acceptor = self.passive_dtp(self, extmode)
def ftp_PORT(self, line):
"""Start an active data channel by using IPv4."""
if self._epsvall:
self.respond("501 PORT not allowed after EPSV ALL.")
return
# Parse PORT request for getting IP and PORT.
# Request comes in as:
# > h1,h2,h3,h4,p1,p2
# ...where the client's IP address is h1.h2.h3.h4 and the TCP
# port number is (p1 * 256) + p2.
try:
addr = list(map(int, line.split(',')))
if len(addr) != 6:
raise ValueError
for x in addr[:4]:
if not 0 <= x <= 255:
raise ValueError
ip = '%d.%d.%d.%d' % tuple(addr[:4])
port = (addr[4] * 256) + addr[5]
if not 0 <= port <= 65535:
raise ValueError
except (ValueError, OverflowError):
self.respond("501 Invalid PORT format.")
return
self._make_eport(ip, port)
def ftp_EPRT(self, line):
"""Start an active data channel by choosing the network protocol
to use (IPv4/IPv6) as defined in RFC-2428.
"""
if self._epsvall:
self.respond("501 EPRT not allowed after EPSV ALL.")
return
# Parse EPRT request for getting protocol, IP and PORT.
# Request comes in as:
# <d>proto<d>ip<d>port<d>
# ...where <d> is an arbitrary delimiter character (usually "|") and
# <proto> is the network protocol to use (1 for IPv4, 2 for IPv6).
try:
af, ip, port = line.split(line[0])[1:-1]
port = int(port)
if not 0 <= port <= 65535:
raise ValueError
except (ValueError, IndexError, OverflowError):
self.respond("501 Invalid EPRT format.")
return
if af == "1":
# test if AF_INET6 and IPV6_V6ONLY
if (self.socket.family == socket.AF_INET6 and not
SUPPORTS_HYBRID_IPV6):
self.respond('522 Network protocol not supported (use 2).')
else:
try:
octs = list(map(int, ip.split('.')))
if len(octs) != 4:
raise ValueError
for x in octs:
if not 0 <= x <= 255:
raise ValueError
except (ValueError, OverflowError):
self.respond("501 Invalid EPRT format.")
else:
self._make_eport(ip, port)
elif af == "2":
if self.socket.family == socket.AF_INET:
self.respond('522 Network protocol not supported (use 1).')
else:
self._make_eport(ip, port)
else:
if self.socket.family == socket.AF_INET:
self.respond('501 Unknown network protocol (use 1).')
else:
self.respond('501 Unknown network protocol (use 2).')
def ftp_PASV(self, line):
"""Start a passive data channel by using IPv4."""
if self._epsvall:
self.respond("501 PASV not allowed after EPSV ALL.")
return
self._make_epasv(extmode=False)
def ftp_EPSV(self, line):
"""Start a passive data channel by using IPv4 or IPv6 as defined
in RFC-2428.
"""
# RFC-2428 specifies that if an optional parameter is given,
# we have to determine the address family from that otherwise
# use the same address family used on the control connection.
# In such a scenario a client may use IPv4 on the control channel
# and choose to use IPv6 for the data channel.
# But how could we use IPv6 on the data channel without knowing
# which IPv6 address to use for binding the socket?
# Unfortunately RFC-2428 does not provide satisfing information
# on how to do that. The assumption is that we don't have any way
# to know wich address to use, hence we just use the same address
# family used on the control connection.
if not line:
self._make_epasv(extmode=True)
# IPv4
elif line == "1":
if self.socket.family != socket.AF_INET:
self.respond('522 Network protocol not supported (use 2).')
else:
self._make_epasv(extmode=True)
# IPv6
elif line == "2":
if self.socket.family == socket.AF_INET:
self.respond('522 Network protocol not supported (use 1).')
else:
self._make_epasv(extmode=True)
elif line.lower() == 'all':
self._epsvall = True
self.respond(
'220 Other commands other than EPSV are now disabled.')
else:
if self.socket.family == socket.AF_INET:
self.respond('501 Unknown network protocol (use 1).')
else:
self.respond('501 Unknown network protocol (use 2).')
def ftp_QUIT(self, line):
"""Quit the current session disconnecting the client."""
if self.authenticated:
msg_quit = self.authorizer.get_msg_quit(self.username)
else:
msg_quit = "Goodbye."
if len(msg_quit) <= 75:
self.respond("221 %s" % msg_quit)
else:
self.push("221-%s\r\n" % msg_quit)
self.respond("221 ")
# From RFC-959:
# If file transfer is in progress, the connection must remain
# open for result response and the server will then close it.
# We also stop responding to any further command.
if self.data_channel:
self._quit_pending = True
self.del_channel()
else:
self._shutdown_connecting_dtp()
self.close_when_done()
if self.authenticated and self.username:
self.on_logout(self.username)
# --- data transferring
def ftp_LIST(self, path):
"""Return a list of files in the specified directory to the
client.
On success return the directory path, else None.
"""
# - If no argument, fall back on cwd as default.
# - Some older FTP clients erroneously issue /bin/ls-like LIST
# formats in which case we fall back on cwd as default.
try:
iterator = self.run_as_current_user(self.fs.get_list_dir, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
producer = BufferedIteratorProducer(iterator)
self.push_dtp_data(producer, isproducer=True, cmd="LIST")
return path
def ftp_NLST(self, path):
"""Return a list of files in the specified directory in a
compact form to the client.
On success return the directory path, else None.
"""
try:
if self.fs.isdir(path):
listing = self.run_as_current_user(self.fs.listdir, path)
else:
# if path is a file we just list its name
self.fs.lstat(path) # raise exc in case of problems
listing = [os.path.basename(path)]
except (OSError, FilesystemError) as err:
self.respond('550 %s.' % _strerror(err))
else:
data = ''
if listing:
try:
listing.sort()
except UnicodeDecodeError:
# (Python 2 only) might happen on filesystem not
# supporting UTF8 meaning os.listdir() returned a list
# of mixed bytes and unicode strings:
# http://goo.gl/6DLHD
# http://bugs.python.org/issue683592
ls = []
for x in listing:
if not isinstance(x, unicode):
x = unicode(x, 'utf8')
ls.append(x)
listing = sorted(ls)
data = '\r\n'.join(listing) + '\r\n'
data = data.encode('utf8', self.unicode_errors)
self.push_dtp_data(data, cmd="NLST")
return path
# --- MLST and MLSD commands
# The MLST and MLSD commands are intended to standardize the file and
# directory information returned by the server-FTP process. These
# commands differ from the LIST command in that the format of the
# replies is strictly defined although extensible.
def ftp_MLST(self, path):
"""Return information about a pathname in a machine-processable
form as defined in RFC-3659.
On success return the path just listed, else None.
"""
line = self.fs.fs2ftp(path)
basedir, basename = os.path.split(path)
perms = self.authorizer.get_perms(self.username)
# RFC-3659 requires 501 response code if path not exist
if not self.fs.lexists(path):
self.respond("501 No such path %s." % path)
return
try:
size, last_modified_str = self.run_as_current_user(self.fs.infopath, path)
iterator = self.run_as_current_user(
self.fs.format_mlsx, basedir, [(basename, size, last_modified_str)], perms,
self._current_facts, ignore_err=False)
data = b''.join(iterator)
except (OSError, FilesystemError) as err:
self.respond('550 %s.' % _strerror(err))
else:
data = data.decode('utf8', self.unicode_errors)
# since TVFS is supported (see RFC-3659 chapter 6), a fully
# qualified pathname should be returned
data = data.split(' ')[0] + ' %s\r\n' % line
# response is expected on the command channel
self.push('250-Listing "%s":\r\n' % line)
# the fact set must be preceded by a space
self.push(' ' + data)
self.respond('250 End MLST.')
return path
def ftp_MLSD(self, path):
"""Return contents of a directory in a machine-processable form
as defined in RFC-3659.
On success return the path just listed, else None.
"""
# RFC-3659 requires 501 response code if path is not a directory
if not self.fs.isdir(path):
self.respond("501 No such directory.")
return
try:
listing = self.run_as_current_user(self.fs.listdir, path)
except (OSError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
else:
perms = self.authorizer.get_perms(self.username)
iterator = self.fs.format_mlsx(path, listing, perms,
self._current_facts)
producer = BufferedIteratorProducer(iterator)
self.push_dtp_data(producer, isproducer=True, cmd="MLSD")
return path
def ftp_RETR(self, file):
"""Retrieve the specified file (transfer from the server to the
client). On success return the file path else None.
"""
rest_pos = self._restart_position
self._restart_position = 0
try:
fd = self.run_as_current_user(self.fs.open, file, 'rb')
except (EnvironmentError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
return
try:
if rest_pos:
# Make sure that the requested offset is valid (within the
# size of the file being resumed).
# According to RFC-1123 a 554 reply may result in case that
# the existing file cannot be repositioned as specified in
# the REST.
ok = 0
try:
if rest_pos > self.fs.getsize(file):
raise ValueError
fd.seek(rest_pos)
ok = 1
except ValueError:
why = "Invalid REST parameter"
except (EnvironmentError, FilesystemError) as err:
why = _strerror(err)
if not ok:
fd.close()
self.respond('554 %s' % why)
return
producer = FileProducer(fd, self._current_type)
self.push_dtp_data(producer, isproducer=True, file=fd, cmd="RETR")
return file
except Exception:
fd.close()
raise
def ftp_STOR(self, file, mode='w'):
"""Store a file (transfer from the client to the server).
On success return the file path, else None.
"""
# A resume could occur in case of APPE or REST commands.
# In that case we have to open file object in different ways:
# STOR: mode = 'w'
# APPE: mode = 'a'
# REST: mode = 'r+' (to permit seeking on file object)
if 'a' in mode:
cmd = 'APPE'
else:
cmd = 'STOR'
rest_pos = self._restart_position
self._restart_position = 0
if rest_pos:
mode = 'r+'
try:
fd = self.run_as_current_user(self.fs.open, file, mode + 'b')
except (EnvironmentError, FilesystemError) as err:
why = _strerror(err)
self.respond('550 %s.' % why)
return
try:
if rest_pos:
# Make sure that the requested offset is valid (within the
# size of the file being resumed).
# According to RFC-1123 a 554 reply may result in case
# that the existing file cannot be repositioned as
# specified in the REST.
ok = 0
try:
if rest_pos > self.fs.getsize(file):
raise ValueError
fd.seek(rest_pos)
ok = 1
except ValueError:
why = "Invalid REST parameter"
except (EnvironmentError, FilesystemError) as err:
why = _strerror(err)
if not ok:
fd.close()
self.respond('554 %s' % why)
return
if self.data_channel is not None:
resp = "Data connection already open. Transfer starting."
self.respond("125 " + resp)
self.data_channel.file_obj = fd
self.data_channel.enable_receiving(self._current_type, cmd)
else:
resp = "File status okay. About to open data connection."
self.respond("150 " + resp)
self._in_dtp_queue = (fd, cmd)
return file
except Exception:
fd.close()
raise
def ftp_STOU(self, line):
"""Store a file on the server with a unique name.
On success return the file path, else None.
"""
# Note 1: RFC-959 prohibited STOU parameters, but this
# prohibition is obsolete.
# Note 2: 250 response wanted by RFC-959 has been declared
# incorrect in RFC-1123 that wants 125/150 instead.
# Note 3: RFC-1123 also provided an exact output format
# defined to be as follow:
# > 125 FILE: pppp
# ...where pppp represents the unique path name of the
# file that will be written.
# watch for STOU preceded by REST, which makes no sense.
if self._restart_position:
self.respond("450 Can't STOU while REST request is pending.")
return
if line:
basedir, prefix = os.path.split(self.fs.ftp2fs(line))
prefix = prefix + '.'
else:
basedir = self.fs.ftp2fs(self.fs.cwd)
prefix = 'ftpd.'
try:
fd = self.run_as_current_user(self.fs.mkstemp, prefix=prefix,
dir=basedir)
except (EnvironmentError, FilesystemError) as err:
# likely, we hit the max number of retries to find out a
# file with a unique name
if getattr(err, "errno", -1) == errno.EEXIST:
why = 'No usable unique file name found'
# something else happened
else:
why = _strerror(err)
self.respond("450 %s." % why)
return
try:
if not self.authorizer.has_perm(self.username, 'w', fd.name):
try:
fd.close()
self.run_as_current_user(self.fs.remove, fd.name)
except (OSError, FilesystemError):
pass
self.respond("550 Not enough privileges.")
return
# now just acts like STOR except that restarting isn't allowed
filename = os.path.basename(fd.name)
if self.data_channel is not None:
self.respond("125 FILE: %s" % filename)
self.data_channel.file_obj = fd