Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fixes #15676: Remove Map#keySet because of memory not freed #2453

Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -258,7 +258,7 @@ class GitTechniqueReader(
if(versionsMods.values.forall( _ == VersionDeleted)
&& currentTechniquesInfoCache.techniques.get(name).map( _.size) == Some(versionsMods.size)
) {
(name, TechniqueDeleted(name, versionsMods.keySet))
(name, TechniqueDeleted(name, versionsMods.keysIterator.toSet))
} else {
(name, TechniqueUpdated(name, versionsMods))
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ class TechniqueRepositoryImpl(
override def getTechniquesInfo() = techniqueInfosCache

override def getTechniqueVersions(name: TechniqueName): SortedSet[TechniqueVersion] = {
SortedSet[TechniqueVersion]() ++ techniqueInfosCache.techniques.get(name).toSeq.flatMap(_.keySet)
SortedSet[TechniqueVersion]() ++ techniqueInfosCache.techniques.get(name).toSeq.flatMap(_.keysIterator)
}

override def getByName(name:TechniqueName) : Map[TechniqueVersion, Technique] = {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -234,7 +234,7 @@ object RuleTarget extends Loggable {
serverRoles.size>0 || isPolicyServer || nodeId == Constants.ROOT_POLICY_SERVER_ID
}
(Set[NodeId]() /: targets) { case (nodes , target) => target match {
case AllTarget => return allNodes.keySet
case AllTarget => return allNodes.keysIterator.toSet
case AllTargetExceptPolicyServers => nodes ++ allNodes.collect { case(k,n) if(!n._1) => k }
case PolicyServerTarget(nodeId) => nodes + nodeId
case AllServersWithRole =>
Expand All @@ -258,7 +258,7 @@ object RuleTarget extends Loggable {
case TargetIntersection(targets) =>
val nodeSets = targets.map(t => getNodeIds(Set(t), allNodes, groups))
// Compute the intersection of the sets of Nodes
val intersection = (allNodes.keySet/: nodeSets) {
val intersection = (allNodes.keysIterator.toSet /: nodeSets) {
case (currentIntersection, nodes) => currentIntersection.intersect(nodes)
}
nodes ++ intersection
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ class CachedReportsExecutionRepository(

override def getNodesLastRun(nodeIds: Set[NodeId]): Box[Map[NodeId, Option[AgentRunWithNodeConfig]]] = this.synchronized {
(for {
runs <- readBackend.getNodesLastRun(nodeIds.diff(cache.keySet))
runs <- readBackend.getNodesLastRun(nodeIds.diff(cache.keysIterator.toSet))
} yield {
cache = cache ++ runs
cache.filterKeys { x => nodeIds.contains(x) }
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -524,7 +524,7 @@ class UpdateExpectedReportsJdbcRepository(

Update[(String, String)]("""
update nodes_info set config_ids = ? where node_id = ?
""").updateMany(params).map(_ => configInfos.keySet)
""").updateMany(params).map(_ => configInfos.keysIterator.toSet)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -122,7 +122,7 @@ class HistorizationServiceImpl(

// a node closable is a node that is current in the database, but don't exist in the
// ldap
val closable = registered.keySet.filter(x => !(nodeInfos.map(node => node.id.value)).contains(x))
val closable = registered.keysIterator.toSet.filter(x => !(nodeInfos.map(node => node.id.value)).contains(x))
historizationRepository.updateNodes(changed, closable.toSeq)
}) ?~! "Could not update the nodes historization information in base."

Expand Down Expand Up @@ -150,7 +150,7 @@ class HistorizationServiceImpl(

// a group closable is a group that is current in the database, but don't exist in the
// ldap
val closable = registered.keySet.filter(x => !(nodeGroups.map( _.nodeGroup.id.value)).toSet.contains(x))
val closable = registered.keysIterator.toSet.filter(x => !(nodeGroups.map( _.nodeGroup.id.value)).toSet.contains(x))

historizationRepository.updateGroups(changed, closable.toSeq)
}) ?~! "Could not update the groups historization information in base."
Expand Down Expand Up @@ -188,9 +188,9 @@ class HistorizationServiceImpl(
}
}.toSeq.map { case (t,fat,d) => (d, fat.toActiveTechnique, t) }

val stringDirectiveIds = directives.keySet.map( _.value)
val stringDirectiveIds = directives.keysIterator.toSet.map( _.value)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

wow, this fails
[ERROR] /root/dev/rudder/webapp/sources/rudder/rudder-core/src/main/scala/com/normation/rudder/services/eventlog/HistorizationService.scala:191: error: missing parameter type for expanded function ((x$4: ) => x$4.value)
[ERROR] val stringDirectiveIds = directives.keysIterator.toSet.map( _.value)
[ERROR] ^

i have no idea why ...

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

doing it in 2 steps fixes the issues
val directiveSet = directives.keysIterator.toSet
val stringDirectiveIds = directiveSet.map( _.value)


val closable = registered.keySet.filter(x => !stringDirectiveIds.contains(x))
val closable = registered.keysIterator.toSet.filter(x => !stringDirectiveIds.contains(x))

historizationRepository.updateDirectives(changed, closable.toSeq)
}) ?~! s"Could not update the directives historization information in base."
Expand All @@ -208,7 +208,7 @@ class HistorizationServiceImpl(
})

// a closable rule is a rule that is in the database, but not in the ldap
val closable = registered.keySet.filter(x => !(rules.map(rule => rule.id)).contains(x)).
val closable = registered.keysIterator.toSet.filter(x => !(rules.map(rule => rule.id)).contains(x)).
map(x => x.value)
historizationRepository.updateRules(changed, closable.toSeq)
}) ?~! s"Could not update the rules historization information in base."
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -369,7 +369,7 @@ trait NodeInfoServiceCached extends NodeInfoService with Loggable with CachedRep
case Full(newCache) =>
logger.debug(s"NodeInfo cache is not up to date, last modification time: '${newCache.lastModTime}', last cache update:"+
s" '${lastUpdate}' => reseting cache with ${newCache.nodeInfos.size} entries")
logger.trace(s"NodeInfo cache updated entries: [${newCache.nodeInfos.keySet.map{ _.value }.mkString(", ")}]")
logger.trace(s"NodeInfo cache updated entries: [${newCache.nodeInfos.keysIterator.map{ _.value }.mkString(", ")}]")
nodeCache = Some(newCache)
Full(newCache.nodeInfos)
case eb: EmptyBox =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -415,7 +415,7 @@ trait PromiseGenerationService {
///// so now we have everything for each updated nodes, we can start writing node policies and then expected reports

// WHY DO WE NEED TO FORGET OTHER NODES CACHE INFO HERE ?
_ <- forgetOtherNodeConfigurationState(nodeConfigs.keySet) ?~! "Cannot clean the configuration cache"
_ <- forgetOtherNodeConfigurationState(nodeConfigs.keysIterator.toSet) ?~! "Cannot clean the configuration cache"

writeTime = System.currentTimeMillis
writtenNodeConfigs <- writeNodeConfigurations(rootNodeId, updatedNodeConfigIds, nodeConfigs, allLicenses, globalPolicyMode, generationTime, parallelism) ?~!"Cannot write nodes configuration"
Expand All @@ -429,9 +429,9 @@ trait PromiseGenerationService {

// finally, run post-generation hooks. They can lead to an error message for build, but node policies are updated
postHooksTime = System.currentTimeMillis
updatedNodes = updatedNodeConfigs.keySet.toSeq.toSet // prevent from keeping an undue reference after generation
updatedNodes = updatedNodeConfigs.keysIterator.toSet // prevent from keeping an undue reference after generation
// Doing Set[NodeId]() ++ updatedNodeConfigs.keySet didn't allow to free the objects
errorNodes = activeNodeIds -- nodeConfigs.keySet
errorNodes = activeNodeIds -- nodeConfigs.keysIterator
_ <- runPostHooks(generationTime, new DateTime(postHooksTime), updatedNodeConfigs, systemEnv, UPDATED_NODE_IDS_PATH)
timeRunPostGenHooks = (System.currentTimeMillis - postHooksTime)
_ = PolicyLogger.debug(s"Post-policy-generation hooks ran in ${timeRunPostGenHooks} ms")
Expand Down Expand Up @@ -1071,7 +1071,7 @@ object BuildNodeConfiguration extends Loggable {

val success = nodeConfigs.collect { case Right(c) => c }.toList
val failures = nodeConfigs.collect { case Left(f) => f }.toSet
val failedIds = nodeContexts.keySet -- success.map( _.nodeInfo.id )
val failedIds = nodeContexts.keysIterator.toSet -- success.map( _.nodeInfo.id )

val result = recFailNodes(failedIds, success, failures)
failures.size match {
Expand All @@ -1098,7 +1098,7 @@ object BuildNodeConfiguration extends Loggable {
if(newFailed.isEmpty) { //ok, returns
NodeConfigurations(maybeSuccess, failures.toList.map(Failure(_)))
} else { // recurse
val allFailed = failed ++ newFailed.keySet
val allFailed = failed ++ newFailed.keysIterator
recFailNodes(allFailed, maybeSuccess.filter(cfg => !allFailed.contains(cfg.nodeInfo.id)), failures ++ newFailed.values)
}
}
Expand All @@ -1120,7 +1120,7 @@ trait PromiseGeneration_updateAndWriteRule extends PromiseGenerationService {
* Return the updated map of all node configurations (really present).
*/
def purgeDeletedNodes(allNodes: Set[NodeId], allNodeConfigs: Map[NodeId, NodeConfiguration]) : Box[Map[NodeId, NodeConfiguration]] = {
val nodesToDelete = allNodeConfigs.keySet -- allNodes
val nodesToDelete = allNodeConfigs.keysIterator.toSet -- allNodes
for {
deleted <- nodeConfigurationService.deleteNodeConfigurations(nodesToDelete)
} yield {
Expand Down Expand Up @@ -1158,7 +1158,7 @@ trait PromiseGeneration_updateAndWriteRule extends PromiseGenerationService {
} else {
val nodeToKeep = updatedConfig.map( _.id ).toSet
PolicyLogger.info(s"Configuration of following ${updatedConfig.size} nodes were updated, their promises are going to be written: [${updatedConfig.map(_.id.value).mkString(", ")}]")
nodeConfigurations.keySet.intersect(nodeToKeep)
nodeConfigurations.keysIterator.toSet.intersect(nodeToKeep)
}
}

Expand Down Expand Up @@ -1229,7 +1229,7 @@ trait PromiseGeneration_updateAndWriteRule extends PromiseGenerationService {
val fsWrite0 = System.currentTimeMillis

for {
written <- promisesFileWriterService.writeTemplate(rootNodeId, updated.keySet, allNodeConfigs, updated, allLicenses, globalPolicyMode, generationTime, maxParallelism)
written <- promisesFileWriterService.writeTemplate(rootNodeId, updated.keysIterator.toSet, allNodeConfigs, updated, allLicenses, globalPolicyMode, generationTime, maxParallelism)
ldapWrite0 = DateTime.now.getMillis
fsWrite1 = (ldapWrite0 - fsWrite0)
_ = PolicyLogger.debug(s"Node configuration written on filesystem in ${fsWrite1} ms")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ class RuleValServiceImpl(

def getTargetedNodes(rule: Rule, groupLib: FullNodeGroupCategory, allNodeInfos: Map[NodeId, NodeInfo]): Set[NodeId] = {
val wantedNodeIds = groupLib.getNodeIds(rule.targets, allNodeInfos)
val nodeIds = wantedNodeIds.intersect(allNodeInfos.keySet)
val nodeIds = wantedNodeIds.intersect(allNodeInfos.keysIterator.toSet)
if(nodeIds.size != wantedNodeIds.size) {
logger.error(s"Some nodes are in the target of rule '${rule.name}' (${rule.id.value}) but are not present " +
s"in the system. It looks like an inconsistency error. Ignored nodes: ${(wantedNodeIds -- nodeIds).map( _.value).mkString(", ")}")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -237,7 +237,7 @@ class TechniqueAcceptationUpdater(

case (TechniqueUpdated(name, mods), Some(activeTechnique)) =>
logger.debug("Update acceptation datetime for: " + activeTechnique.techniqueName)
val versionsMap = mods.keySet.map( v => (v, acceptationDatetime)).toMap
val versionsMap = mods.keysIterator.map( v => (v, acceptationDatetime)).toMap
rwActiveTechniqueRepo.setAcceptationDatetimes(activeTechnique.id, versionsMap, modId, actor, reason) match {
case e: EmptyBox =>
e ?~! s"Error when saving Active Technique ${activeTechnique.id.value} for technque ${activeTechnique.techniqueName}"
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -283,18 +283,18 @@ class InMemoryNodeConfigurationHashRepository extends NodeConfigurationHashRepos
* delete all node configuration
*/
def deleteAllNodeConfigurations() : Box[Unit] = {
val values = repository.keySet
val values = repository.keysIterator.toSet
repository.clear

Full(values.toSet)
Full(values)
}

/**
* Inverse of delete: delete all node configuration not
* given in the argument.
*/
def onlyKeepNodeConfiguration(nodeIds:Set[NodeId]) : Box[Set[NodeId]] = {
val remove = repository.keySet -- nodeIds
val remove = repository.keysIterator.toSet -- nodeIds
repository --= remove
Full(nodeIds)
}
Expand All @@ -304,7 +304,7 @@ class InMemoryNodeConfigurationHashRepository extends NodeConfigurationHashRepos
def save(NodeConfigurationHash: Set[NodeConfigurationHash]): Box[Set[NodeId]] = {
val toAdd = NodeConfigurationHash.map(c => (c.id, c)).toMap
repository ++= toAdd
Full(toAdd.keySet)
Full(toAdd.keysIterator.toSet)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,7 @@ trait DefaultStringQueryParser extends StringQueryParser {

val objectType = criterionObjects.getOrElse(line.objectType,
return Failure(s"The object type '${line.objectType}' is unknown in line 'line'. Possible object types: [${
criterionObjects.keySet.toList.sorted.mkString(",")}] ".format(line))
criterionObjects.keysIterator.toList.sorted.mkString(",")}] ".format(line))
)

val criterion = objectType.criterionForName(line.attribute).getOrElse {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ object QSLdapBackend {

if(m.size != QSAttribute.all.size) {
throw new IllegalArgumentException("Be carefull, it seems that the list of attributes in QSAttribute was modified, but not the list of name mapping" +
s"Please check for '${(m.keySet.diff(QSAttribute.all)++QSAttribute.all.diff(m.keySet)).mkString("', '")}'"
s"Please check for '${(m.keysIterator.toSet.diff(QSAttribute.all)++QSAttribute.all.diff(m.keysIterator)).mkString("', '")}'"
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

missing a toSet here

)
}
m
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -833,8 +833,8 @@ object ExecutionBatch extends Loggable {
* - reports without expected component => unknown
* - both expected component and reports => check
*/
val reportKeys = reports.keySet
val expectedKeys = expectedComponents.keySet
val reportKeys = reports.keysIterator.toSet
val expectedKeys = expectedComponents.keysIterator.toSet
val okKeys = reportKeys.intersect(expectedKeys)

val missing = expectedComponents.filterKeys(k => !reportKeys.contains(k)).map { case ((d,_), (pm,mrs,c)) =>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -241,7 +241,7 @@ class CachedNodeChangesServiceImpl(
changes.getOrElse(ruleId, Map()).getOrElse(i, 0)
}

(changes1.keySet ++ changes2.keySet).map { ruleId =>
(changes1.keysIterator.toSet ++ changes2.keysIterator).map { ruleId =>
(ruleId,
on.map { i =>
val updated = get(changes1, ruleId, i) + get(changes2, ruleId, i)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ trait RuleOrNodeReportingServiceImpl extends ReportingService {

for {
systemDirectiveIds <- directivesRepo.getFullDirectiveLibrary().map( _.allDirectives.values.collect{ case(at, d) if(at.isSystem) => d.id }.toSet)
nodeIds <- nodeInfoService.getAll().map( _.keySet )
nodeIds <- nodeInfoService.getAll().map( _.keysIterator.toSet )
reports <- findRuleNodeStatusReports(nodeIds, Set())
} yield {

Expand Down Expand Up @@ -221,7 +221,7 @@ trait CachedFindRuleNodeStatusReports extends ReportingService with CachedReposi

for {
// disabled nodes are ignored
allNodeIds <- nodeInfoService.getAll.map( _.filter { case(_,n) => n.state != NodeState.Ignored }.keySet )
allNodeIds <- nodeInfoService.getAll.map( _.filter { case(_,n) => n.state != NodeState.Ignored }.keysIterator.toSet )
//only try to update nodes that are accepted in Rudder
nodeIds = nodeIdsToCheck.intersect(allNodeIds)
/*
Expand Down Expand Up @@ -255,7 +255,7 @@ trait CachedFindRuleNodeStatusReports extends ReportingService with CachedReposi
newStatus <- defaultFindRuleNodeStatusReports.findRuleNodeStatusReports(expired, Set())
} yield {
//here, newStatus.keySet == expired.keySet, so we have processed all nodeIds that should be modified.
logger.debug(s"Compliance cache miss (updated):[${newStatus.keySet.map(_.value).mkString(" , ")}], "+
logger.debug(s"Compliance cache miss (updated):[${newStatus.keysIterator.map(_.value).mkString(" , ")}], "+
s" hit:[${upToDate.map(_.value).mkString(" , ")}]")
cache = cache ++ newStatus
val toReturn = cache.filterKeys { id => nodeIds.contains(id) }
Expand Down Expand Up @@ -291,7 +291,7 @@ trait CachedFindRuleNodeStatusReports extends ReportingService with CachedReposi
for {
infos <- nodeInfoService.getAll
} yield {
checkAndUpdateCache(infos.keySet)
checkAndUpdateCache(infos.keysIterator.toSet)
}
}
()
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -75,12 +75,12 @@ sealed trait NodeDetailLevel {
* Does any of the listed fields need to be looked-up
* with full inventory ?
*/
final def needFullInventory() = NodeDetailLevel.fullInventoryFields.keySet.intersect(fields).nonEmpty
final def needFullInventory() = NodeDetailLevel.fullInventoryFields.keysIterator.toSet.intersect(fields).nonEmpty

/**
* Does any of the listed fields need software look-up?
*/
final def needSoftware() = NodeDetailLevel.softwareFields.keySet.intersect(fields).nonEmpty
final def needSoftware() = NodeDetailLevel.softwareFields.keysIterator.toSet.intersect(fields).nonEmpty
}

case object MinimalDetailLevel extends NodeDetailLevel {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -243,7 +243,7 @@ class ComplianceAPIService(
nodeInfos <- nodeInfoService.getAll()
compliance <- getGlobalComplianceMode()
reportsByNode <- reportingService.findRuleNodeStatusReports(
nodeInfos.keySet, rules.map(_.id).toSet
nodeInfos.keysIterator.toSet, rules.map(_.id).toSet
)
} yield {

Expand Down Expand Up @@ -354,7 +354,7 @@ class ComplianceAPIService(
}
compliance <- getGlobalComplianceMode()
reports <- reportingService.findRuleNodeStatusReports(
nodeInfos.keySet, rules.map(_.id).toSet
nodeInfos.keysIterator.toSet, rules.map(_.id).toSet
)
} yield {

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -556,15 +556,15 @@ class NodeApiService6 (
case PendingInventory => nodeInfoService.getPendingNodeInfos()
case RemovedInventory => nodeInfoService.getDeletedNodeInfos()
}
nodeIds = nodeFilter.getOrElse(nodeInfos.keySet).toSet
nodeIds = nodeFilter.getOrElse(nodeInfos.keysIterator).toSet
runs <- roAgentRunsRepository.getNodesLastRun(nodeIds)
inventories <- if(detailLevel.needFullInventory()) {
inventoryRepository.getAllInventories(state)
} else {
Full(Map[NodeId, FullInventory]())
}
software <- if(detailLevel.needSoftware()) {
softwareRepository.getSoftwareByNode(nodeInfos.keySet, state)
softwareRepository.getSoftwareByNode(nodeInfos.keysIterator.toSet, state)
} else {
Full(Map[NodeId, Seq[Software]]())
}
Expand Down