diff --git a/bin/_build.sh b/bin/_build.sh index bdeefaf86..02386a72a 100644 --- a/bin/_build.sh +++ b/bin/_build.sh @@ -168,6 +168,13 @@ init_build () { use_openssl="false" ;; esac; + else + case "$(uname -s)" in + Darwin) + # Needed to build OpenSSL 64-bit on OS X + export KERNEL_BITS=64 + ;; + esac fi; conditional_set requirements "${default_requirements}" @@ -490,7 +497,7 @@ c_dependencies () { if [ ${use_openssl} == "true" ]; then ruler; - local min_ssl_version="9470095"; # OpenSSL 0.9.8zf + local min_ssl_version="268443791"; # OpenSSL 1.0.2h local ssl_version="$(c_macro openssl/ssl.h OPENSSL_VERSION_NUMBER)"; if [ -z "${ssl_version}" ]; then ssl_version="0x0"; fi; @@ -499,13 +506,13 @@ c_dependencies () { if [ "${ssl_version}" -ge "${min_ssl_version}" ]; then using_system "OpenSSL"; else - local v="0.9.8zh"; + local v="1.0.2h"; local n="openssl"; local p="${n}-${v}"; # use 'config' instead of 'configure'; 'make' instead of 'jmake'. # also pass 'shared' to config to build shared libs. - c_dependency -c "config" -s "3ff71636bea85a99f4d76a10d119c09bda0421e3" \ + c_dependency -c "config" -s "577585f5f5d299c44dd3c993d3c0ac7a219e4949" \ -p "make depend" -b "make" \ "openssl" "${p}" \ "http://www.openssl.org/source/${p}.tar.gz" "shared"; diff --git a/calendarserver/tap/caldav.py b/calendarserver/tap/caldav.py index 2fae5a169..b89aba7a5 100644 --- a/calendarserver/tap/caldav.py +++ b/calendarserver/tap/caldav.py @@ -245,6 +245,7 @@ def __init__(self, logEnabled, logPath, logRotateLength, logMaxFiles, logRotateO self.logRotateLength = logRotateLength self.logMaxFiles = logMaxFiles self.logRotateOnStart = logRotateOnStart + self.name = "elms" def setServiceParent(self, app): @@ -280,7 +281,7 @@ class CalDAVService (ErrorLoggingMultiService): # The ConnectionService is a MultiService which bundles all the connection # services together for the purposes of being able to stop them and wait # for all of their connections to close before shutting down. - connectionServiceName = "ConnectionService" + connectionServiceName = "cs" def __init__(self, logObserver): self.logObserver = logObserver # accesslog observer @@ -292,6 +293,7 @@ def __init__(self, logObserver): config.ErrorLogMaxRotatedFiles, config.ErrorLogRotateOnStart, ) + self.name = "cds" def privilegedStartService(self): @@ -862,6 +864,11 @@ def location(frame): frame=frame ) + if config.Manhole.Enabled: + namespace= dict({service.name: service}) + for n, s in service.namedServices.iteritems(): + namespace[n] = s + self._makeManhole(namespace=namespace, parent=service) return service @@ -898,6 +905,7 @@ def makeService_Slave(self, options): result = self.requestProcessingService(options, store, logObserver) if pool is not None: + pool.setName("db") pool.setServiceParent(result) if config.ControlSocket: @@ -937,6 +945,7 @@ def queueMasterAvailable(connectionFromMaster): controlClient = ControlSocketConnectingService( endpointFactory, controlSocketClient ) + controlClient.setName("control") controlClient.setServiceParent(result) # Optionally set up push notifications @@ -947,6 +956,7 @@ def queueMasterAvailable(connectionFromMaster): pushSubService = ApplePushNotifierService.makeService( config.Notifications.Services.APNS, store) observers.append(pushSubService) + pushSubService.setName("APNS") pushSubService.setServiceParent(result) if config.Notifications.Services.AMP.Enabled: pushSubService = AMPPushForwarder(controlSocketClient) @@ -959,6 +969,7 @@ def queueMasterAvailable(connectionFromMaster): mailRetriever = MailRetriever( store, directory, config.Scheduling.iMIP.Receiving ) + mailRetriever.setName("MailRetriever") mailRetriever.setServiceParent(result) else: mailRetriever = None @@ -988,37 +999,6 @@ def decorateTransaction(txn): txn._groupCacher = groupCacher store.callWithNewTransactions(decorateTransaction) - - # Optionally enable Manhole access - if config.Manhole.Enabled: - try: - from twisted.conch.manhole_tap import ( - makeService as manholeMakeService - ) - portString = "tcp:{:d}:interface=127.0.0.1".format( - config.Manhole.StartingPortNumber + int(config.LogID) + 1 - ) - manholeService = manholeMakeService({ - "sshPort": None, - "telnetPort": portString, - "namespace": { - "config": config, - "service": result, - "store": store, - "directory": directory, - }, - "passwd": config.Manhole.PasswordFilePath, - }) - manholeService.setServiceParent(result) - # Using print(because logging isn't ready at this point) - print("Manhole access enabled:", portString) - - except ImportError: - print( - "Manhole access could not enabled because " - "manhole_tap could not be imported" - ) - return result @@ -1181,10 +1161,12 @@ def updateFactory(configDict, reloading=False): # 'SSL' tag on it, since that's the only time it's used. contextFactory = None - ReportingHTTPService( + reportingService = ReportingHTTPService( requestFactory, int(config.MetaFD), contextFactory, - usingSocketFile=config.SocketFiles.Enabled - ).setServiceParent(connectionService) + usingSocketFile=config.SocketFiles.Enabled, + ) + reportingService.setName("http-{}".format(int(config.MetaFD))) + reportingService.setServiceParent(connectionService) else: # Not inheriting, therefore we open our own: for bindAddress in self._allBindAddresses(): @@ -1211,6 +1193,8 @@ def updateFactory(configDict, reloading=False): backlog=config.ListenBacklog, inherit=False ) + httpsService.setName( + "https-{}:{}".format(bindAddress,int(port))) httpsService.setServiceParent(connectionService) for port in config.BindHTTPPorts: @@ -1311,6 +1295,59 @@ def _spawnMemcached(self, monitor=None): Popen(memcachedArgv) + def _makeManhole(self, namespace=None, parent=None): + try: + import inspect + import objgraph + except ImportError: + pass + try: + if 'inspect' in locals(): + namespace['ins'] = inspect + if 'objgraph' in locals(): + namespace['og'] = objgraph + from pprint import pprint + namespace.update({ + 'pp': pprint, + 'cfg': config, + }) + from twisted.conch.manhole_tap import ( + makeService as manholeMakeService + ) + portOffset = 0 if config.LogID == '' else int(config.LogID) + 1 + portString = "tcp:{:d}:interface=127.0.0.1".format( + config.Manhole.StartingPortNumber + portOffset + ) + manholeService = manholeMakeService({ + "passwd": config.Manhole.PasswordFilePath, + "telnetPort": + portString if config.Manhole.UseSSH is False else None, + "sshPort": + portString if config.Manhole.UseSSH is True else None, + "sshKeyDir": config.DataRoot, + "sshKeyName": config.Manhole.sshKeyName, + "sshKeySize": config.Manhole.sshKeySize, + "namespace": namespace, + }) + manholeService.setName("manhole") + if parent is not None: + manholeService.setServiceParent(parent) + # Using print(because logging isn't ready at this point) + print("Manhole access enabled:", portString) + except ImportError: + print( + "Manhole access could not enabled because " + "manhole_tap could not be imported." + ) + import platform + if platform.system() == "Darwin": + if config.Manhole.UseSSH: + print( + "Set Manhole.UseSSH to false or rebuild CS with the " + "USE_OPENSSL environment variable set." + ) + + def makeService_Single(self, options): """ Create a service to be used in a single-process, stand-alone @@ -1332,6 +1369,7 @@ def slaveSvcCreator(pool, store, logObserver, storageService): config.Notifications.Services.APNS, store ) observers.append(pushSubService) + pushSubService.setName("APNS") pushSubService.setServiceParent(result) if config.Notifications.Services.AMP.Enabled: pushSubService = AMPPushMaster( @@ -1362,6 +1400,7 @@ def slaveSvcCreator(pool, store, logObserver, storageService): mailRetriever = MailRetriever( store, directory, config.Scheduling.iMIP.Receiving ) + mailRetriever.setName("mailRetriever") mailRetriever.setServiceParent(result) else: mailRetriever = None @@ -1401,35 +1440,6 @@ def slaveSvcCreator(pool, store, logObserver, storageService): else: groupCacher = None - # Optionally enable Manhole access - if config.Manhole.Enabled: - try: - from twisted.conch.manhole_tap import ( - makeService as manholeMakeService - ) - portString = "tcp:{:d}:interface=127.0.0.1".format( - config.Manhole.StartingPortNumber - ) - manholeService = manholeMakeService({ - "sshPort": None, - "telnetPort": portString, - "namespace": { - "config": config, - "service": result, - "store": store, - "directory": directory, - }, - "passwd": config.Manhole.PasswordFilePath, - }) - manholeService.setServiceParent(result) - # Using print(because logging isn't ready at this point) - print("Manhole access enabled:", portString) - except ImportError: - print( - "Manhole access could not enabled because " - "manhole_tap could not be imported" - ) - def decorateTransaction(txn): txn._pushDistributor = pushDistributor txn._rootResource = result.rootResource @@ -1526,6 +1536,7 @@ def agentServiceCreator(pool, store, ignored, storageService): config.ErrorLogMaxRotatedFiles, config.ErrorLogRotateOnStart, ) + svc.setName("agent") svc.setServiceParent(agentLoggingService) return agentLoggingService @@ -1582,6 +1593,7 @@ def subServiceFactory(connectionFactory, storageService): dbtype=DatabaseType(dialect, paramstyle, dbfeatures), maxConnections=config.MaxDBConnectionsPerPool ) + cp.setName("db") cp.setServiceParent(ms) store = storeFromConfigWithoutDPS(config, cp.connection) @@ -1655,6 +1667,7 @@ def subServiceFactory(connectionFactory, storageService): UpgradeReleaseLockStep(store) ) + pps.setName("pre") pps.setServiceParent(ms) return ms @@ -1784,6 +1797,7 @@ def makeService_Combined(self, options): monitor = DelayedStartupProcessMonitor() s.processMonitor = monitor + monitor.setName("pm") monitor.setServiceParent(s) if config.MemoryLimiter.Enabled: @@ -1791,6 +1805,7 @@ def makeService_Combined(self, options): monitor, config.MemoryLimiter.Seconds, config.MemoryLimiter.Bytes, config.MemoryLimiter.ResidentOnly ) + memoryLimiter.setName("ml") memoryLimiter.setServiceParent(s) # Maybe spawn memcached through a ProcessMonitor @@ -1877,32 +1892,6 @@ def _openSocket(addr, port): statsService.setName("tcp-stats") statsService.setServiceParent(s) - # Optionally enable Manhole access - if config.Manhole.Enabled: - try: - from twisted.conch.manhole_tap import ( - makeService as manholeMakeService - ) - portString = "tcp:{:d}:interface=127.0.0.1".format( - config.Manhole.StartingPortNumber - ) - manholeService = manholeMakeService({ - "sshPort": None, - "telnetPort": portString, - "namespace": { - "config": config, - "service": s, - }, - "passwd": config.Manhole.PasswordFilePath, - }) - manholeService.setServiceParent(s) - # Using print(because logging isn't ready at this point) - print("Manhole access enabled:", portString) - except ImportError: - print( - "Manhole access could not enabled because " - "manhole_tap could not be imported" - ) # Finally, let's get the real show on the road. Create a service that @@ -1944,11 +1933,13 @@ def spawnerSvcCreator(pool, store, ignored, storageService): else: dispenser = None multi = MultiService() + multi.setName("multi") pool.setServiceParent(multi) spawner = SlaveSpawnerService( self, monitor, dispenser, dispatcher, stats, options["config"], inheritFDs=inheritFDs, inheritSSLFDs=inheritSSLFDs ) + spawner.setName("spawner") spawner.setServiceParent(multi) if config.UseMetaFD: cl.setServiceParent(multi) @@ -1961,6 +1952,7 @@ def spawnerSvcCreator(pool, store, ignored, storageService): mailRetriever = MailRetriever( store, directory, config.Scheduling.iMIP.Receiving ) + mailRetriever.setName("MailRetriever") mailRetriever.setServiceParent(multi) else: mailRetriever = None @@ -1993,6 +1985,7 @@ def decorateTransaction(txn): ssvc = self.storageService( spawnerSvcCreator, None, uid, gid ) + ssvc.setName("ssvc") ssvc.setServiceParent(s) return s diff --git a/calendarserver/tools/gateway.py b/calendarserver/tools/gateway.py index b5a543344..f1cda1b12 100755 --- a/calendarserver/tools/gateway.py +++ b/calendarserver/tools/gateway.py @@ -35,16 +35,14 @@ WRITABLE_CONFIG_KEYS, setKeyPath, getKeyPath, flattenDictionary, WritableConfig ) -from calendarserver.tools.principals import ( - getProxies, setProxies -) from calendarserver.tools.purge import ( WorkerService, PurgeOldEventsService, DEFAULT_BATCH_SIZE, DEFAULT_RETAIN_DAYS, PrincipalPurgeWork ) from calendarserver.tools.util import ( - recordForPrincipalID, autoDisableMemcached + recordForPrincipalID, autoDisableMemcached, + getProxies, setProxies ) from pycalendar.datetime import DateTime from twext.who.directory import DirectoryRecord diff --git a/calendarserver/tools/principals.py b/calendarserver/tools/principals.py index b99925663..8e1f07b44 100755 --- a/calendarserver/tools/principals.py +++ b/calendarserver/tools/principals.py @@ -32,7 +32,7 @@ from calendarserver.tools.cmdline import utilityMain, WorkerService from calendarserver.tools.util import ( - recordForPrincipalID, prettyRecord + recordForPrincipalID, prettyRecord, action_addProxy, action_removeProxy ) from twext.who.directory import DirectoryRecord from twext.who.idirectory import RecordType, InvalidDirectoryRecordError @@ -40,8 +40,7 @@ from twisted.internet.defer import inlineCallbacks, returnValue, succeed from twistedcaldav.config import config from twistedcaldav.cache import MemcacheChangeNotifier -from txdav.who.delegates import Delegates, RecordType as DelegateRecordType, \ - CachingDelegates +from txdav.who.delegates import CachingDelegates from txdav.who.idirectory import AutoScheduleMode from txdav.who.groups import GroupCacherPollingWork @@ -604,104 +603,7 @@ def action_listProxyFor(store, record, *proxyTypes): -@inlineCallbacks -def _addRemoveProxy(msg, fn, store, record, proxyType, *proxyIDs): - directory = store.directoryService() - readWrite = (proxyType == "write") - for proxyID in proxyIDs: - proxyRecord = yield recordForPrincipalID(directory, proxyID) - if proxyRecord is None: - print("Invalid principal ID: %s" % (proxyID,)) - else: - txn = store.newTransaction() - yield fn(txn, record, proxyRecord, readWrite) - yield txn.commit() - print( - "{msg} {proxy} as a {proxyType} proxy for {record}".format( - msg=msg, proxy=prettyRecord(proxyRecord), - proxyType=proxyType, record=prettyRecord(record) - ) - ) - - - -@inlineCallbacks -def action_addProxy(store, record, proxyType, *proxyIDs): - if config.GroupCaching.Enabled and config.GroupCaching.UseDirectoryBasedDelegates: - if record.recordType in ( - record.service.recordType.location, - record.service.recordType.resource, - ): - print("You are not allowed to add proxies for locations or resources via command line when their proxy assignments come from the directory service.") - returnValue(None) - yield _addRemoveProxy("Added", Delegates.addDelegate, store, record, proxyType, *proxyIDs) - - - -@inlineCallbacks -def action_removeProxy(store, record, *proxyIDs): - if config.GroupCaching.Enabled and config.GroupCaching.UseDirectoryBasedDelegates: - if record.recordType in ( - record.service.recordType.location, - record.service.recordType.resource, - ): - print("You are not allowed to remove proxies for locations or resources via command line when their proxy assignments come from the directory service.") - returnValue(None) - - # Write - yield _addRemoveProxy("Removed", Delegates.removeDelegate, store, record, "write", *proxyIDs) - # Read - yield _addRemoveProxy("Removed", Delegates.removeDelegate, store, record, "read", *proxyIDs) - - - -@inlineCallbacks -def setProxies(record, readProxyRecords, writeProxyRecords): - """ - Set read/write proxies en masse for a record - @param record: L{IDirectoryRecord} - @param readProxyRecords: a list of records - @param writeProxyRecords: a list of records - """ - - proxyTypes = [ - (DelegateRecordType.readDelegateGroup, readProxyRecords), - (DelegateRecordType.writeDelegateGroup, writeProxyRecords), - ] - for recordType, proxyRecords in proxyTypes: - if proxyRecords is None: - continue - proxyGroup = yield record.service.recordWithShortName( - recordType, record.uid - ) - yield proxyGroup.setMembers(proxyRecords) - - - -@inlineCallbacks -def getProxies(record): - """ - Returns a tuple containing the records for read proxies and write proxies - of the given record - """ - - allProxies = { - DelegateRecordType.readDelegateGroup: [], - DelegateRecordType.writeDelegateGroup: [], - } - for recordType in allProxies.iterkeys(): - proxyGroup = yield record.service.recordWithShortName( - recordType, record.uid - ) - allProxies[recordType] = yield proxyGroup.members() - - returnValue( - ( - allProxies[DelegateRecordType.readDelegateGroup], - allProxies[DelegateRecordType.writeDelegateGroup] - ) - ) @@ -871,14 +773,7 @@ def action_setAutoScheduleMode(store, record, autoScheduleMode): ) ) - # Get original fields - newFields = record.fields.copy() - - # Set new values - newFields[record.service.fieldName.autoScheduleMode] = autoScheduleMode - - updatedRecord = DirectoryRecord(record.service, newFields) - yield record.service.updateRecords([updatedRecord], create=False) + yield record.setAutoScheduleMode(autoScheduleMode) diff --git a/calendarserver/tools/test/test_principals.py b/calendarserver/tools/test/test_principals.py index c83f0a86c..290eec449 100644 --- a/calendarserver/tools/test/test_principals.py +++ b/calendarserver/tools/test/test_principals.py @@ -19,7 +19,10 @@ from twistedcaldav.stdconfig import config from calendarserver.tools.principals import ( parseCreationArgs, matchStrings, - recordForPrincipalID, getProxies, setProxies + recordForPrincipalID +) +from calendarserver.tools.util import ( + getProxies, setProxies ) from twext.python.filepath import CachingFilePath as FilePath from twisted.internet import reactor diff --git a/calendarserver/tools/util.py b/calendarserver/tools/util.py index 10ad6f2be..d045c3aca 100644 --- a/calendarserver/tools/util.py +++ b/calendarserver/tools/util.py @@ -30,7 +30,6 @@ import socket from pwd import getpwnam from grp import getgrnam -from uuid import UUID from calendarserver.tools import diagnose @@ -41,9 +40,6 @@ from twext.python.log import Logger from twisted.internet.defer import inlineCallbacks, returnValue -from txdav.xml import element as davxml - - from twistedcaldav import memcachepool from txdav.base.propertystore.base import PropertyName from txdav.xml import element @@ -52,6 +48,8 @@ from twext.who.idirectory import RecordType from txdav.who.idirectory import RecordType as CalRecordType +from txdav.who.delegates import Delegates, RecordType as DelegateRecordType + log = Logger() @@ -272,8 +270,6 @@ def principalForPrincipalID(principalID, checkOnly=False, directory=None): returnValue((yield directory.principalCollection.principalForShortName(recordType, shortName))) try: - UUID(principalID) - if checkOnly: returnValue(None) @@ -343,126 +339,107 @@ def recordForPrincipalID(directory, principalID, checkOnly=False): raise ValueError("Invalid principal identifier: %s" % (principalID,)) - -def proxySubprincipal(principal, proxyType): - return principal.getChild("calendar-proxy-" + proxyType) +@inlineCallbacks +def _addRemoveProxy(msg, fn, store, record, proxyType, *proxyIDs): + directory = store.directoryService() + readWrite = (proxyType == "write") + for proxyID in proxyIDs: + proxyRecord = yield recordForPrincipalID(directory, proxyID) + if proxyRecord is None: + print("Invalid principal ID: %s" % (proxyID,)) + else: + txn = store.newTransaction() + yield fn(txn, record, proxyRecord, readWrite) + yield txn.commit() + print( + "{msg} {proxy} as a {proxyType} proxy for {record}".format( + msg=msg, proxy=prettyRecord(proxyRecord), + proxyType=proxyType, record=prettyRecord(record) + ) + ) @inlineCallbacks -def action_addProxyPrincipal(rootResource, directory, store, principal, proxyType, proxyPrincipal): - try: - (yield addProxy(rootResource, directory, store, principal, proxyType, proxyPrincipal)) - print("Added %s as a %s proxy for %s" % ( - prettyPrincipal(proxyPrincipal), proxyType, - prettyPrincipal(principal))) - except ProxyError, e: - print("Error:", e) - except ProxyWarning, e: - print(e) +def action_addProxy(store, record, proxyType, *proxyIDs): + if config.GroupCaching.Enabled and config.GroupCaching.UseDirectoryBasedDelegates: + if record.recordType in ( + record.service.recordType.location, + record.service.recordType.resource, + ): + print("You are not allowed to add proxies for locations or resources via command line when their proxy assignments come from the directory service.") + returnValue(None) + + yield _addRemoveProxy("Added", Delegates.addDelegate, store, record, proxyType, *proxyIDs) @inlineCallbacks -def action_removeProxyPrincipal(rootResource, directory, store, principal, proxyPrincipal, **kwargs): - try: - removed = (yield removeProxy( - rootResource, directory, store, - principal, proxyPrincipal, **kwargs - )) - if removed: - print("Removed %s as a proxy for %s" % ( - prettyPrincipal(proxyPrincipal), - prettyPrincipal(principal))) - except ProxyError, e: - print("Error:", e) - except ProxyWarning, e: - print(e) +def action_removeProxy(store, record, *proxyIDs): + if config.GroupCaching.Enabled and config.GroupCaching.UseDirectoryBasedDelegates: + if record.recordType in ( + record.service.recordType.location, + record.service.recordType.resource, + ): + print("You are not allowed to remove proxies for locations or resources via command line when their proxy assignments come from the directory service.") + returnValue(None) + # Write + yield _addRemoveProxy("Removed", Delegates.removeDelegate, store, record, "write", *proxyIDs) + # Read + yield _addRemoveProxy("Removed", Delegates.removeDelegate, store, record, "read", *proxyIDs) @inlineCallbacks -def addProxy(rootResource, directory, store, principal, proxyType, proxyPrincipal): - proxyURL = proxyPrincipal.url() - - subPrincipal = yield proxySubprincipal(principal, proxyType) - if subPrincipal is None: - raise ProxyError( - "Unable to edit %s proxies for %s\n" % ( - proxyType, - prettyPrincipal(principal) - ) +def setProxies(record, readProxyRecords, writeProxyRecords): + """ + Set read/write proxies en masse for a record + @param record: L{IDirectoryRecord} + @param readProxyRecords: a list of records + @param writeProxyRecords: a list of records + """ + + proxyTypes = [ + (DelegateRecordType.readDelegateGroup, readProxyRecords), + (DelegateRecordType.writeDelegateGroup, writeProxyRecords), + ] + for recordType, proxyRecords in proxyTypes: + if proxyRecords is None: + continue + proxyGroup = yield record.service.recordWithShortName( + recordType, record.uid ) + yield proxyGroup.setMembers(proxyRecords) - membersProperty = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None)) - for memberURL in membersProperty.children: - if str(memberURL) == proxyURL: - raise ProxyWarning("%s is already a %s proxy for %s" % ( - prettyPrincipal(proxyPrincipal), proxyType, - prettyPrincipal(principal))) - else: - memberURLs = list(membersProperty.children) - memberURLs.append(davxml.HRef(proxyURL)) - membersProperty = davxml.GroupMemberSet(*memberURLs) - (yield subPrincipal.writeProperty(membersProperty, None)) +@inlineCallbacks +def getProxies(record): + """ + Returns a tuple containing the records for read proxies and write proxies + of the given record + """ - proxyTypes = ["read", "write"] - proxyTypes.remove(proxyType) + allProxies = { + DelegateRecordType.readDelegateGroup: [], + DelegateRecordType.writeDelegateGroup: [], + } + for recordType in allProxies.iterkeys(): + proxyGroup = yield record.service.recordWithShortName( + recordType, record.uid + ) + allProxies[recordType] = yield proxyGroup.members() - yield action_removeProxyPrincipal( - rootResource, directory, store, - principal, proxyPrincipal, proxyTypes=proxyTypes + returnValue( + ( + allProxies[DelegateRecordType.readDelegateGroup], + allProxies[DelegateRecordType.writeDelegateGroup] + ) ) - # Schedule work the PeerConnectionPool will pick up as overdue - def groupPollNow(txn): - from txdav.who.groups import GroupCacherPollingWork - return GroupCacherPollingWork.reschedule(txn, 0, force=True) - yield store.inTransaction("addProxy groupPollNow", groupPollNow) - - -@inlineCallbacks -def removeProxy(rootResource, directory, store, principal, proxyPrincipal, **kwargs): - removed = False - proxyTypes = kwargs.get("proxyTypes", ("read", "write")) - for proxyType in proxyTypes: - proxyURL = proxyPrincipal.url() - - subPrincipal = yield proxySubprincipal(principal, proxyType) - if subPrincipal is None: - raise ProxyError( - "Unable to edit %s proxies for %s\n" % ( - proxyType, - prettyPrincipal(principal) - ) - ) - - membersProperty = (yield subPrincipal.readProperty(davxml.GroupMemberSet, None)) - - memberURLs = [ - m for m in membersProperty.children - if str(m) != proxyURL - ] - - if len(memberURLs) == len(membersProperty.children): - # No change - continue - else: - removed = True - - membersProperty = davxml.GroupMemberSet(*memberURLs) - (yield subPrincipal.writeProperty(membersProperty, None)) - - if removed: - # Schedule work the PeerConnectionPool will pick up as overdue - def groupPollNow(txn): - from txdav.who.groups import GroupCacherPollingWork - return GroupCacherPollingWork.reschedule(txn, 0, force=True) - yield store.inTransaction("removeProxy groupPollNow", groupPollNow) - returnValue(removed) +def proxySubprincipal(principal, proxyType): + return principal.getChild("calendar-proxy-" + proxyType) diff --git a/calendarserver/webadmin/delegation.py b/calendarserver/webadmin/delegation.py index 350e85849..38fa80bfd 100644 --- a/calendarserver/webadmin/delegation.py +++ b/calendarserver/webadmin/delegation.py @@ -27,8 +27,8 @@ import urlparse from calendarserver.tools.util import ( - principalForPrincipalID, proxySubprincipal, action_addProxyPrincipal, - action_removeProxyPrincipal + recordForPrincipalID, proxySubprincipal, action_addProxy, + action_removeProxy, principalForPrincipalID ) from twistedcaldav.config import config @@ -49,6 +49,15 @@ from twext.who.idirectory import RecordType from txdav.who.idirectory import RecordType as CalRecordType, AutoScheduleMode +allowedAutoScheduleModes = { + "default": None, + "none": AutoScheduleMode.none, + "accept-always": AutoScheduleMode.accept, + "decline-always": AutoScheduleMode.decline, + "accept-if-free": AutoScheduleMode.acceptIfFree, + "decline-if-busy": AutoScheduleMode.declineIfBusy, + "automatic": AutoScheduleMode.acceptIfFreeDeclineIfBusy, +} class WebAdminPage(Element): """ @@ -221,8 +230,8 @@ def __init__(self, resourceId, principalResource, davPropertyName, self.principalResource = principalResource self.adminResource = adminResource self.proxySearch = proxySearch - record = principalResource.record - tag.fillSlots(resourceTitle=recordTitle(record), + self.record = principalResource.record + tag.fillSlots(resourceTitle=recordTitle(self.record), resourceId=resourceId, davPropertyName=davPropertyName, proxySearch=proxySearch) @@ -283,9 +292,9 @@ def autoSchedule(self, request, tag): Renderer which elides its tag for non-resource-type principals. """ if ( - self.principalResource.record.recordType.description != "user" and - self.principalResource.record.recordType.description != "group" or - self.principalResource.record.recordType.description == "user" and + self.record.recordType.description != "user" and + self.record.recordType.description != "group" or + self.record.recordType.description == "user" and config.Scheduling.Options.AutoSchedule.AllowUsers ): return tag @@ -293,99 +302,91 @@ def autoSchedule(self, request, tag): @renderer - @inlineCallbacks def isAutoSchedule(self, request, tag): """ Renderer which sets the 'selected' attribute on its tag if the resource is auto-schedule. """ - if (yield self.principalResource.getAutoScheduleMode()) is not AutoScheduleMode.none: + if self.record.autoScheduleMode is not AutoScheduleMode.none: tag(selected='selected') - returnValue(tag) + return tag @renderer - @inlineCallbacks def isntAutoSchedule(self, request, tag): """ Renderer which sets the 'selected' attribute on its tag if the resource is not auto-schedule. """ - if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.none: + if self.record.autoScheduleMode is AutoScheduleMode.none: tag(selected='selected') - returnValue(tag) + return tag @renderer - @inlineCallbacks def autoScheduleModeNone(self, request, tag): """ Renderer which sets the 'selected' attribute on its tag based on the resource auto-schedule-mode. """ - if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.none: + if self.record.autoScheduleMode is AutoScheduleMode.none: tag(selected='selected') - returnValue(tag) + return tag @renderer - @inlineCallbacks def autoScheduleModeAcceptAlways(self, request, tag): """ Renderer which sets the 'selected' attribute on its tag based on the resource auto-schedule-mode. """ - if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.accept: + if self.record.autoScheduleMode is AutoScheduleMode.accept: tag(selected='selected') - returnValue(tag) + return tag @renderer - @inlineCallbacks def autoScheduleModeDeclineAlways(self, request, tag): """ Renderer which sets the 'selected' attribute on its tag based on the resource auto-schedule-mode. """ - if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.decline: + if self.record.autoScheduleMode is AutoScheduleMode.decline: tag(selected='selected') - returnValue(tag) + return tag @renderer - @inlineCallbacks def autoScheduleModeAcceptIfFree(self, request, tag): """ Renderer which sets the 'selected' attribute on its tag based on the resource auto-schedule-mode. """ - if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.acceptIfFree: + if self.record.autoScheduleMode is AutoScheduleMode.acceptIfFree: tag(selected='selected') - returnValue(tag) + return tag @renderer - @inlineCallbacks def autoScheduleModeDeclineIfBusy(self, request, tag): """ Renderer which sets the 'selected' attribute on its tag based on the resource auto-schedule-mode. """ - if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.declineIfBusy: + if self.record.autoScheduleMode is AutoScheduleMode.declineIfBusy: tag(selected='selected') - returnValue(tag) + return tag @renderer - @inlineCallbacks def autoScheduleModeAutomatic(self, request, tag): """ Renderer which sets the 'selected' attribute on its tag based on the resource auto-schedule-mode. """ - if (yield self.principalResource.getAutoScheduleMode()) is AutoScheduleMode.acceptIfFreeDeclineIfBusy: + if self.record.autoScheduleMode is AutoScheduleMode.acceptIfFreeDeclineIfBusy: tag(selected='selected') - returnValue(tag) + return tag _matrix = None @@ -634,9 +635,9 @@ def createSimilarFile(self, path): @inlineCallbacks - def resourceActions(self, request, principal): + def resourceActions(self, request, record): """ - Take all actions on the given principal based on the given request. + Take all actions on the given record based on the given request. """ def queryValue(arg): @@ -651,45 +652,32 @@ def queryValues(arg): matches.append(key[len(arg):]) return matches - autoSchedule = queryValue("autoSchedule") autoScheduleMode = queryValue("autoScheduleMode") makeReadProxies = queryValues("mkReadProxy|") makeWriteProxies = queryValues("mkWriteProxy|") removeProxies = queryValues("rmProxy|") - # Update the auto-schedule value if specified. - if autoSchedule is not None and (autoSchedule == "true" or - autoSchedule == "false"): + # Update the auto-schedule-mode value if specified. + if autoScheduleMode: if ( - principal.record.recordType != RecordType.user and - principal.record.recordType != RecordType.group or - principal.record.recordType == RecordType.user and + record.recordType != RecordType.user and + record.recordType != RecordType.group or + record.recordType == RecordType.user and config.Scheduling.Options.AutoSchedule.AllowUsers ): - (yield principal.setAutoSchedule(autoSchedule == "true")) - (yield principal.setAutoScheduleMode(autoScheduleMode)) + autoScheduleMode = allowedAutoScheduleModes[autoScheduleMode] + yield record.setAutoScheduleMode(autoScheduleMode) + record.autoScheduleMode = autoScheduleMode # Update the proxies if specified. - for proxyId in removeProxies: - proxy = yield self.getResourceById(request, proxyId) - yield action_removeProxyPrincipal( - self.root, self.directory, self.store, - principal, proxy, proxyTypes=["read", "write"] - ) + if removeProxies: + yield action_removeProxy(self.store, record, *removeProxies) - for proxyId in makeReadProxies: - proxy = yield self.getResourceById(request, proxyId) - yield action_addProxyPrincipal( - self.root, self.directory, self.store, - principal, "read", proxy - ) + if makeReadProxies: + yield action_addProxy(self.store, record, "read", *makeReadProxies) - for proxyId in makeWriteProxies: - proxy = yield self.getResourceById(request, proxyId) - yield action_addProxyPrincipal( - self.root, self.directory, self.store, - principal, "write", proxy - ) + if makeWriteProxies: + yield action_addProxy(self.store, record, "write", *makeWriteProxies) @inlineCallbacks @@ -700,8 +688,8 @@ def render(self, request): """ resourceId = request.args.get('resourceId', [''])[0] if resourceId: - principal = yield self.getResourceById(request, resourceId) - yield self.resourceActions(request, principal) + record = yield recordForPrincipalID(self.directory, resourceId) + yield self.resourceActions(request, record) htmlContent = yield flattenString(request, WebAdminPage(self)) response = Response() response.stream = MemoryStream(htmlContent) diff --git a/conf/caldavd-stdconfig.plist b/conf/caldavd-stdconfig.plist index 340b99abd..7258e8216 100644 --- a/conf/caldavd-stdconfig.plist +++ b/conf/caldavd-stdconfig.plist @@ -2123,14 +2123,28 @@ Enabled + + UseSSH + + + StartingPortNumber 5000 + DPSPortNumber 4999 + PasswordFilePath + + + sshKeyName + manhole.key + + sshKeySize + 4096 EnableKeepAlive diff --git a/twistedcaldav/stdconfig.py b/twistedcaldav/stdconfig.py index e0815463a..0c165b1e5 100644 --- a/twistedcaldav/stdconfig.py +++ b/twistedcaldav/stdconfig.py @@ -1108,9 +1108,12 @@ "Manhole": { "Enabled": False, - "StartingPortNumber": 5000, - "DPSPortNumber": 4999, - "PasswordFilePath": "", + "UseSSH": True, # Set to False for telnet + "StartingPortNumber": 5000, # Master listens here, children increment + "DPSPortNumber": 4999, # Directory Proxy listens here + "PasswordFilePath": "", # Path to password file with lines of user:pass + "sshKeyName": "manhole.key", # Relative to DataRoot + "sshKeySize": 4096, }, "EnableKeepAlive": False, diff --git a/txdav/caldav/icalendardirectoryservice.py b/txdav/caldav/icalendardirectoryservice.py index e98798086..e0438e753 100644 --- a/txdav/caldav/icalendardirectoryservice.py +++ b/txdav/caldav/icalendardirectoryservice.py @@ -106,6 +106,15 @@ def getAutoScheduleMode(organizer): #@NoSelf @rtype: C{bool} """ + def setAutoScheduleMode(autoScheduleMode): #@NoSelf + """ + Sets the mode of automatic scheduling used for this record. + + @param autoScheduleMode: the new mode + @type autoScheduleMode: L{AutoScheduleMode} + """ + + def isProxyFor(other): #@NoSelf """ Test whether the record is a calendar user proxy for the specified record. diff --git a/txdav/dps/client.py b/txdav/dps/client.py index 62842ccb3..ae5c5e47c 100644 --- a/txdav/dps/client.py +++ b/txdav/dps/client.py @@ -44,7 +44,8 @@ WikiAccessForUIDCommand, ContinuationCommand, StatsCommand, ExternalDelegatesCommand, ExpandedMemberUIDsCommand, AddMembersCommand, RemoveMembersCommand, - UpdateRecordsCommand, ExpandedMembersCommand, FlushCommand + UpdateRecordsCommand, ExpandedMembersCommand, FlushCommand, + SetAutoScheduleModeCommand ) from txdav.who.delegates import RecordType as DelegatesRecordType from txdav.who.directory import ( @@ -422,6 +423,14 @@ def updateRecords(self, records, create=False): ) + def setAutoScheduleMode(self, record, autoScheduleMode): + return self._sendCommand( + SetAutoScheduleModeCommand, + uid=record.uid.encode("utf-8"), + autoScheduleMode=autoScheduleMode.name, + ) + + @inlineCallbacks def flush(self): try: diff --git a/txdav/dps/commands.py b/txdav/dps/commands.py index 4a9442dad..f951bf850 100644 --- a/txdav/dps/commands.py +++ b/txdav/dps/commands.py @@ -265,6 +265,15 @@ class ExternalDelegatesCommand(amp.Command): ] +class SetAutoScheduleModeCommand(amp.Command): + arguments = [ + ('uid', amp.String()), + ('autoScheduleMode', amp.String()), + ] + response = [ + ('success', amp.Boolean()), + ] + class FlushCommand(amp.Command): arguments = [] diff --git a/txdav/dps/server.py b/txdav/dps/server.py index c6dda04f4..ead70011e 100644 --- a/txdav/dps/server.py +++ b/txdav/dps/server.py @@ -45,8 +45,10 @@ WikiAccessForUIDCommand, ContinuationCommand, ExternalDelegatesCommand, StatsCommand, ExpandedMemberUIDsCommand, AddMembersCommand, RemoveMembersCommand, - UpdateRecordsCommand, FlushCommand, # RemoveRecordsCommand, + UpdateRecordsCommand, FlushCommand, SetAutoScheduleModeCommand, + # RemoveRecordsCommand, ) +from txdav.who.idirectory import AutoScheduleMode from txdav.who.wiki import WikiAccessLevel from zope.interface import implementer @@ -540,6 +542,21 @@ def updateRecords(self, uids, create): returnValue(response) + @SetAutoScheduleModeCommand.responder + @inlineCallbacks + def setAutoScheduleMode(self, uid, autoScheduleMode): + uid = uid.decode("utf-8") + record = yield self._directory.recordWithUID(uid) + autoScheduleMode = autoScheduleMode.decode("utf-8") + autoScheduleMode = AutoScheduleMode.lookupByName(autoScheduleMode) + yield self._directory.setAutoScheduleMode(record, autoScheduleMode) + response = { + "success": True + } + returnValue(response) + + + @GroupsCommand.responder @inlineCallbacks def groups(self, uid): @@ -851,8 +868,12 @@ def makeService(self, options): config.Manhole.DPSPortNumber ) manholeService = manholeMakeService({ - "sshPort": None, - "telnetPort": portString, + "sshPort": portString if config.Manhole.UseSSH is True else None, + "telnetPort": portString if config.Manhole.UseSSH is False else None, + "sshKeyDir": config.DataRoot, + "sshKeyName": "manhole.key", + "sshKeySize": 4096, + "passwd": config.Manhole.PasswordFilePath, "namespace": { "config": config, "service": dpsService, diff --git a/txdav/dps/test/test_client.py b/txdav/dps/test/test_client.py index b15785cbc..3fbed8f02 100644 --- a/txdav/dps/test/test_client.py +++ b/txdav/dps/test/test_client.py @@ -36,6 +36,7 @@ from txdav.who.test.support import ( TestRecord, CalendarInMemoryDirectoryService ) +from txdav.who.idirectory import AutoScheduleMode testMode = "xml" # "xml" or "od" @@ -441,6 +442,22 @@ def configure(self): self.patch(config.Authentication.Wiki, "Enabled", True) + @inlineCallbacks + def test_setAutoScheduleMode(self): + """ + Verify setAutoSchedule works across DPS + """ + record = yield self.client.recordWithUID(u"75EA36BE-F71B-40F9-81F9-CF59BF40CA8F") + # Defaults to automatic + self.assertEquals(record.autoScheduleMode, AutoScheduleMode.acceptIfFreeDeclineIfBusy) + # Change it to accept-if-busy + yield record.setAutoScheduleMode(AutoScheduleMode.acceptIfFree) + # Refetch it + record = yield self.client.recordWithUID(u"75EA36BE-F71B-40F9-81F9-CF59BF40CA8F") + # Verify it's changed + self.assertEquals(record.autoScheduleMode, AutoScheduleMode.acceptIfFree) + + @inlineCallbacks def test_uid(self): record = (yield self.client.recordWithUID(self.wsanchezUID)) diff --git a/txdav/who/augment.py b/txdav/who/augment.py index c15a6de85..c7b96cbeb 100644 --- a/txdav/who/augment.py +++ b/txdav/who/augment.py @@ -471,6 +471,26 @@ def _augment(self, record): returnValue(augmentedRecord) + @inlineCallbacks + def setAutoScheduleMode(self, record, autoScheduleMode): + augmentRecord = yield self._augmentDB.getAugmentRecord( + record.uid, + self.recordTypeToOldName(record.recordType) + ) + if augmentRecord is not None: + autoScheduleMode = { + AutoScheduleMode.none: "none", + AutoScheduleMode.accept: "accept-always", + AutoScheduleMode.decline: "decline-always", + AutoScheduleMode.acceptIfFree: "accept-if-free", + AutoScheduleMode.declineIfBusy: "decline-if-busy", + AutoScheduleMode.acceptIfFreeDeclineIfBusy: "automatic", + }.get(autoScheduleMode) + + augmentRecord.autoScheduleMode = autoScheduleMode + yield self._augmentDB.addAugmentRecords([augmentRecord]) + + class AugmentedDirectoryRecord(DirectoryRecord, CalendarDirectoryRecordMixin): """ diff --git a/txdav/who/directory.py b/txdav/who/directory.py index bb96aa454..5fc173ad9 100644 --- a/txdav/who/directory.py +++ b/txdav/who/directory.py @@ -607,6 +607,10 @@ def getAutoScheduleMode(self, organizer): returnValue(autoScheduleMode) + def setAutoScheduleMode(self, autoScheduleMode): + return self.service.setAutoScheduleMode(self, autoScheduleMode) + + @inlineCallbacks def autoAcceptFromOrganizer(self, organizer): try: diff --git a/txdav/who/test/test_directory.py b/txdav/who/test/test_directory.py index a47158b9a..d6fc97d12 100644 --- a/txdav/who/test/test_directory.py +++ b/txdav/who/test/test_directory.py @@ -358,6 +358,21 @@ def test_getAutoScheduleMode(self): ) + @inlineCallbacks + def test_setAutoScheduleMode(self): + """ + Verify the record.setAutoScheduleMode( ) method + """ + orion = yield self.directory.recordWithUID(u"orion") + # Defaults to automatic + self.assertEquals(orion.autoScheduleMode, AutoScheduleMode.acceptIfFreeDeclineIfBusy) + # Change it to decline-if-busy + yield orion.setAutoScheduleMode(AutoScheduleMode.declineIfBusy) + # Refetch it + orion = yield self.directory.recordWithUID(u"orion") + # Verify it's changed + self.assertEquals(orion.autoScheduleMode, AutoScheduleMode.declineIfBusy) + class DirectoryTestCaseFakeEmail(StoreTestCase):