From 081fdf0e3cb1d710da584df5fb440dd1842d1dcc Mon Sep 17 00:00:00 2001 From: Marina Sahakyan Date: Thu, 17 Nov 2022 14:57:39 +0100 Subject: [PATCH] poolmanager: chache the pool selection prefferences key for request MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Motivation When request is comming the probabilty that the next request for pool selection will hae the same slection parametres for the storage an net units., for example. Therefor it is reasonable to store this input createrieas a a key and if the next request maps it just get the matching pools form the chache. Modification Adding a simlpe chache soleve this. Add property allowing the admin to chose whether they want to use caching or not. Result Based on the test using the benchmarking the gain in performance is without chaching: 6255.168 ops/s, with caching 31811.381 ops/s. test results without caching Result "org.dcache.util.PoolSelectionUnitBenchmarkCaching.matchCaching": 822925.703 ±(99.9%) 90778.999 ops/s [Average] (min, avg, max) = (595144.863, 822925.703, 950679.211), stdev = 121187.367 CI (99.9%): [732146.704, 913704.703] (assumes normal distribution) REMEMBER: The numbers below are just data. To gain reusable insights, you need to follow up on why the numbers are the way they are. Use profilers (see -prof, -lprof), design factorial experiments, perform baseline and negative tests that provide experimental control, make sure the benchmarking environment is safe on JVM/OS/HW level, ask for reviews from the domain experts. Do not assume the numbers tell you what you want them to tell. Benchmark Mode Cnt Score Error Units PoolSelectionUnitBenchmarkCaching.matchCaching thrpt 25 822925.703 ± 90778.999 ops/s Process finished with exit code 0 with caching Result "org.dcache.util.PoolSelectionUnitBenchmarkCaching.matchCaching": 2742824.216 ±(99.9%) 292446.706 ops/s [Average] (min, avg, max) = (2072069.158, 2742824.216, 3203079.348), stdev = 390407.985 CI (99.9%): [2450377.510, 3035270.922] (assumes normal distribution) REMEMBER: The numbers below are just data. To gain reusable insights, you need to follow up on why the numbers are the way they are. Use profilers (see -prof, -lprof), design factorial experiments, perform baseline and negative tests that provide experimental control, make sure the benchmarking environment is safe on JVM/OS/HW level, ask for reviews from the domain experts. Do not assume the numbers tell you what you want them to tell. Benchmark Mode Cnt Score Error Units PoolSelectionUnitBenchmarkCaching.matchCaching thrpt 25 2742824.216 ± 292446.706 ops/s Process finished with exit code 0 Target: master Patch: https://rb.dcache.org/r/ Acked-by: Requires-notes: no --- .../PoolSelectionUnitBenchmarkCaching.java | 371 ++++++++++++++++++ .../poolManager/PoolSelectionUnitV2.java | 54 ++- .../diskCacheV111/poolManager/poolmanager.xml | 1 + .../poolmanager/PoolSelectionUnitTest.java | 29 ++ skel/share/defaults/poolmanager.properties | 8 + 5 files changed, 461 insertions(+), 2 deletions(-) create mode 100644 modules/benchmarks/src/main/java/org/dcache/util/PoolSelectionUnitBenchmarkCaching.java diff --git a/modules/benchmarks/src/main/java/org/dcache/util/PoolSelectionUnitBenchmarkCaching.java b/modules/benchmarks/src/main/java/org/dcache/util/PoolSelectionUnitBenchmarkCaching.java new file mode 100644 index 00000000000..92a0ea307ab --- /dev/null +++ b/modules/benchmarks/src/main/java/org/dcache/util/PoolSelectionUnitBenchmarkCaching.java @@ -0,0 +1,371 @@ +/* + * dCache - http://www.dcache.org/ + * + * Copyright (C) 2023 Deutsches Elektronen-Synchrotron + * + * This program is free software: you can redistribute it and/or modify + * it under the terms of the GNU Affero General Public License as + * published by the Free Software Foundation, either version 3 of the + * License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Affero General Public License for more details. + * + * You should have received a copy of the GNU Affero General Public License + * along with this program. If not, see . + */ +package org.dcache.util; + +import diskCacheV111.poolManager.PoolPreferenceLevel; +import diskCacheV111.poolManager.PoolSelectionUnit.DirectionType; +import diskCacheV111.poolManager.PoolSelectionUnitV2; +import diskCacheV111.pools.PoolV2Mode; +import diskCacheV111.vehicles.GenericStorageInfo; +import diskCacheV111.vehicles.StorageInfos; +import dmg.util.CommandException; +import dmg.util.CommandInterpreter; +import java.util.concurrent.ThreadLocalRandom; +import java.util.function.Predicate; +import org.dcache.vehicles.FileAttributes; +import org.openjdk.jmh.annotations.Benchmark; +import org.openjdk.jmh.annotations.BenchmarkMode; +import org.openjdk.jmh.annotations.Mode; +import org.openjdk.jmh.annotations.Scope; +import org.openjdk.jmh.annotations.Setup; +import org.openjdk.jmh.annotations.State; +import org.openjdk.jmh.annotations.Threads; +import org.openjdk.jmh.runner.Runner; +import org.openjdk.jmh.runner.RunnerException; +import org.openjdk.jmh.runner.options.Options; +import org.openjdk.jmh.runner.options.OptionsBuilder; + +@State(Scope.Thread) +@BenchmarkMode(Mode.Throughput) +public class PoolSelectionUnitBenchmarkCaching { + + private PoolSelectionUnitV2 psu; + private final Predicate excludeNoPools = p -> false; + + private FileAttributes fileAttributes = FileAttributes.of() + .storageInfo(GenericStorageInfo.valueOf("a:b@osm", "*")) + .build(); + private String[] adresses; + + @Setup + public void setUp() throws CommandException { + + psu = new PoolSelectionUnitV2(); + var ci = new CommandInterpreter(psu); + + StorageInfos.injectInto(GenericStorageInfo.valueOf("*", "*"), fileAttributes); + + adresses = new String[]{ + + "131.169.214.101", "131.169.214.102", "131.169.214.103", "131.169.104.123", + "131.169.214.105", + "131.169.214.106", "131.169.214.107", + "131.169.214.108", "131.169.214.109", "131.169.214.110", "131.169.214.111", + "131.169.214.112", + "131.169.214.113", "131.169.214.114", + "131.169.214.115", "131.169.214.116", "131.169.214.117", "131.169.214.118", + "131.169.214.119", + "131.169.214.20", + + "141.169.214.101", "141.169.214.102", "141.169.214.103", "141.169.104.123", + "141.169.214.105", + "141.169.214.106", "141.169.214.107", + "141.169.214.108", "141.169.214.109", "141.169.214.110", "141.169.214.111", + "141.169.214.112", + "141.169.214.113", "141.169.214.114", + "141.169.214.115", "141.169.214.116", "141.169.214.117", "141.169.214.118", + "141.169.214.119", + "141.169.214.20", + + "143.169.214.101", "143.169.214.102", "143.169.214.103", "143.169.104.123", + "143.169.214.105", + "143.169.214.106", "143.169.214.107", + "143.169.214.108", "143.169.214.109", "143.169.214.110", "143.169.214.111", + "143.169.214.112", + "143.169.214.113", "143.169.214.114", + "143.169.214.115", "143.169.214.116", "143.169.214.117", "143.169.214.118", + "143.169.214.119", + "143.169.214.20", + + "231.169.214.101", "231.169.214.102", "231.169.214.103", "231.169.104.123", + "231.169.214.105", "231.169.214.106", "231.169.214.107", + "231.169.214.108", "231.169.214.109", "231.169.214.110", + "231.169.214.111", "231.169.214.112", "231.169.214.113", "231.169.214.114", + "231.169.214.115", "231.169.214.116", "231.169.214.117", + "231.169.214.118", "231.169.214.119", "231.169.214.20", + + + }; + + // storage units + + ci.command(new Args("psu create unit -store h1:u1@osm")); + ci.command(new Args("psu create unit -store h1:u2@osm")); + + ci.command(new Args("psu create unit -store zeus:u1@osm")); + ci.command(new Args("psu create unit -store zeus:u2@osm")); + + ci.command(new Args("psu create unit -store flc:u1@osm")); + ci.command(new Args("psu create unit -store flc:u2@osm")); + + ci.command(new Args("psu create unit -store hermes:u1@osm")); + ci.command(new Args("psu create unit -store hermes:u2@osm")); + + ci.command(new Args("psu create unit -store herab:u1@osm")); + ci.command(new Args("psu create unit -store herab:u2@osm")); + + ci.command(new Args("psu create unit -store *@*")); + + // store unit groups + + ci.command(new Args("psu create ugroup all-h1")); + ci.command(new Args("psu create ugroup all-zeus")); + ci.command(new Args("psu create ugroup all-flc")); + ci.command(new Args("psu create ugroup all-hermes")); + ci.command(new Args("psu create ugroup all-herab")); + ci.command(new Args("psu create ugroup all-hera")); + ci.command(new Args("psu create ugroup all")); + + // populate ugroups + + ci.command(new Args("psu addto ugroup all-h1 h1:u1@osm")); + ci.command(new Args("psu addto ugroup all-h1 h1:u2@osm")); + + ci.command(new Args("psu addto ugroup all-h1 zeus:u1@osm")); + ci.command(new Args("psu addto ugroup all-h1 zeus:u2@osm")); + + ci.command(new Args("psu addto ugroup all-h1 flc:u1@osm")); + ci.command(new Args("psu addto ugroup all-h1 flc:u2@osm")); + + ci.command(new Args("psu addto ugroup all-h1 hermes:u1@osm")); + ci.command(new Args("psu addto ugroup all-h1 hermes:u2@osm")); + + ci.command(new Args("psu addto ugroup all-h1 herab:u1@osm")); + ci.command(new Args("psu addto ugroup all-h1 herab:u2@osm")); + + ci.command(new Args("psu addto ugroup all h1:u1@osm")); + ci.command(new Args("psu addto ugroup all h1:u2@osm")); + ci.command(new Args("psu addto ugroup all zeus:u1@osm")); + ci.command(new Args("psu addto ugroup all zeus:u2@osm")); + ci.command(new Args("psu addto ugroup all flc:u1@osm")); + ci.command(new Args("psu addto ugroup all flc:u2@osm")); + ci.command(new Args("psu addto ugroup all hermes:u1@osm")); + ci.command(new Args("psu addto ugroup all hermes:u2@osm")); + ci.command(new Args("psu addto ugroup all herab:u1@osm")); + ci.command(new Args("psu addto ugroup all herab:u2@osm")); + ci.command(new Args("psu addto ugroup all *@*")); + + ci.command(new Args("psu addto ugroup all-hera h1:u1@osm")); + ci.command(new Args("psu addto ugroup all-hera h1:u2@osm")); + ci.command(new Args("psu addto ugroup all-hera zeus:u1@osm")); + ci.command(new Args("psu addto ugroup all-hera zeus:u2@osm")); + ci.command(new Args("psu addto ugroup all-hera hermes:u1@osm")); + ci.command(new Args("psu addto ugroup all-hera hermes:u2@osm")); + ci.command(new Args("psu addto ugroup all-hera herab:u1@osm")); + ci.command(new Args("psu addto ugroup all-hera herab:u2@osm")); + + // network + ci.command(new Args("psu create unit -net 131.169.0.0/255.255.0.0")); + + ci.command(new Args("psu create unit -net 131.169.214.109/231.169.214.106")); + ci.command(new Args("psu create unit -net 0.0.0.0/0.0.0.0")); + ci.command(new Args("psu create unit -net 2001:638:700::0/48")); + ci.command(new Args("psu create unit -net ::/0")); + + // net groups + ci.command(new Args("psu create ugroup intern")); + ci.command(new Args("psu create ugroup extern")); + + // populate net groups + ci.command(new Args("psu addto ugroup intern 131.169.0.0/255.255.0.0")); + ci.command(new Args("psu addto ugroup extern 0.0.0.0/0.0.0.0")); + ci.command(new Args("psu addto ugroup intern 2001:638:700::0/48")); + ci.command(new Args("psu addto ugroup extern ::/0")); + ci.command(new Args("psu addto ugroup intern 131.169.214.109/231.169.214.106")); + + // pools + ci.command(new Args("psu create pool h1-read")); + psu.getPool("h1-read").setPoolMode(new PoolV2Mode(PoolV2Mode.ENABLED)); + ci.command(new Args("psu create pool h1-write")); + psu.getPool("h1-write").setPoolMode(new PoolV2Mode(PoolV2Mode.ENABLED)); + + ci.command(new Args("psu create pool zeus-read")); + psu.getPool("zeus-read").setPoolMode(new PoolV2Mode(PoolV2Mode.ENABLED)); + ci.command(new Args("psu create pool zeus-write")); + psu.getPool("zeus-write").setPoolMode(new PoolV2Mode(PoolV2Mode.ENABLED)); + + ci.command(new Args("psu create pool flc-read")); + psu.getPool("flc-read").setPoolMode(new PoolV2Mode(PoolV2Mode.ENABLED)); + ci.command(new Args("psu create pool flc-write")); + psu.getPool("flc-write").setPoolMode(new PoolV2Mode(PoolV2Mode.ENABLED)); + + ci.command(new Args("psu create pool hermes-read")); + psu.getPool("hermes-read").setPoolMode(new PoolV2Mode(PoolV2Mode.ENABLED)); + ci.command(new Args("psu create pool hermes-write")); + psu.getPool("hermes-write").setPoolMode(new PoolV2Mode(PoolV2Mode.ENABLED)); + + ci.command(new Args("psu create pool herab-read")); + psu.getPool("herab-read").setPoolMode(new PoolV2Mode(PoolV2Mode.ENABLED)); + ci.command(new Args("psu create pool herab-write")); + psu.getPool("herab-write").setPoolMode(new PoolV2Mode(PoolV2Mode.ENABLED)); + + ci.command(new Args("psu create pool default-read")); + psu.getPool("default-read").setPoolMode(new PoolV2Mode(PoolV2Mode.ENABLED)); + ci.command(new Args("psu create pool default-write")); + psu.getPool("default-write").setPoolMode(new PoolV2Mode(PoolV2Mode.ENABLED)); + + // pool groups + + ci.command(new Args("psu create pgroup h1-read-pools")); + ci.command(new Args("psu create pgroup h1-write-pools")); + + ci.command(new Args("psu create pgroup zeus-read-pools")); + ci.command(new Args("psu create pgroup zeus-write-pools")); + + ci.command(new Args("psu create pgroup flc-read-pools")); + ci.command(new Args("psu create pgroup flc-write-pools")); + + ci.command(new Args("psu create pgroup hermes-read-pools")); + ci.command(new Args("psu create pgroup hermes-write-pools")); + + ci.command(new Args("psu create pgroup herab-read-pools")); + ci.command(new Args("psu create pgroup herab-write-pools")); + + ci.command(new Args("psu create pgroup default-read-pools")); + ci.command(new Args("psu create pgroup default-write-pools")); + + // Populate pool groups + + ci.command(new Args("psu addto pgroup h1-read-pools h1-read")); + ci.command(new Args("psu addto pgroup h1-write-pools h1-write")); + + ci.command(new Args("psu addto pgroup zeus-read-pools zeus-read")); + ci.command(new Args("psu addto pgroup zeus-write-pools zeus-write")); + + ci.command(new Args("psu addto pgroup flc-read-pools flc-read")); + ci.command(new Args("psu addto pgroup flc-write-pools flc-write")); + + ci.command(new Args("psu addto pgroup hermes-read-pools hermes-read")); + ci.command(new Args("psu addto pgroup hermes-write-pools hermes-write")); + + ci.command(new Args("psu addto pgroup herab-read-pools herab-read")); + ci.command(new Args("psu addto pgroup herab-write-pools herab-write")); + + ci.command(new Args("psu addto pgroup default-read-pools default-read")); + ci.command(new Args("psu addto pgroup default-write-pools default-write")); + + // links + + ci.command(new Args("psu create link h1-read-link all-h1 intern")); + ci.command(new Args("psu create link h1-write-link all-h1 intern")); + + ci.command(new Args("psu create link zeus-read-link all-zeus intern")); + ci.command(new Args("psu create link zeus-write-link all-zeus intern")); + + ci.command(new Args("psu create link flc-read-link all-flc intern")); + ci.command(new Args("psu create link flc-write-link all-flc intern")); + + ci.command(new Args("psu create link hermes-read-link all-hermes intern")); + ci.command(new Args("psu create link hermes-write-link all-hermes intern")); + + ci.command(new Args("psu create link herab-read-link all-herab intern")); + ci.command(new Args("psu create link herab-write-link all-herab intern")); + + ci.command(new Args("psu create link default-read-link-in all intern")); + ci.command(new Args("psu create link default-write-link-in all intern")); + + ci.command(new Args("psu create link default-read-link-ex all extern")); + ci.command(new Args("psu create link default-write-link-ex all extern")); + + + ci.command(new Args( + "psu set link h1-read-link -readpref=20 -writepref=0 -cachepref=20")); + ci.command(new Args( + "psu set link zeus-read-link -readpref=20 -writepref=0 -cachepref=20")); + ci.command(new Args( + "psu set link flc-read-link -readpref=20 -writepref=0 -cachepref=20")); + ci.command(new Args( + "psu set link hermes-read-link -readpref=20 -writepref=0 -cachepref=20")); + ci.command(new Args( + "psu set link herab-read-link -readpref=20 -writepref=0 -cachepref=20")); + ci.command(new Args( + "psu set link default-read-link-in -readpref=1 -writepref=0 -cachepref=20")); + ci.command(new Args( + "psu set link default-read-link-ex -readpref=1 -writepref=0 -cachepref=20")); + + ci.command(new Args( + "psu set link h1-write-link -writepref=20 -readpref=0 -cachepref=0")); + ci.command(new Args( + "psu set link zeus-write-link -writepref=20 -readpref=0 -cachepref=0")); + ci.command(new Args( + "psu set link flc-write-link -writepref=20 -readpref=0 -cachepref=0")); + ci.command(new Args( + "psu set link hermes-write-link -writepref=20 -readpref=0 -cachepref=0")); + ci.command(new Args( + "psu set link herab-write-link -writepref=20 -readpref=0 -cachepref=0")); + ci.command(new Args( + "psu set link default-write-link-in -writepref=1 -readpref=0 -cachepref=0")); + ci.command(new Args( + "psu set link default-write-link-ex -writepref=1 -readpref=0 -cachepref=0")); + + // assign pool groups to links + ci.command(new Args("psu addto link h1-read-link h1-read-pools")); + ci.command(new Args("psu addto link h1-write-link h1-write-pools")); + + ci.command(new Args("psu addto link zeus-read-link zeus-read-pools")); + ci.command(new Args("psu addto link zeus-write-link zeus-write-pools")); + + ci.command(new Args("psu addto link flc-read-link flc-read-pools")); + ci.command(new Args("psu addto link flc-write-link flc-write-pools")); + + ci.command(new Args("psu addto link hermes-read-link hermes-read-pools")); + ci.command(new Args("psu addto link hermes-write-link hermes-write-pools")); + + ci.command(new Args("psu addto link herab-read-link herab-read-pools")); + ci.command(new Args("psu addto link herab-write-link herab-write-pools")); + + ci.command(new Args("psu addto link default-read-link-ex default-read-pools")); + ci.command(new Args("psu addto link default-write-link-ex default-write-pools")); + + ci.command(new Args("psu addto link default-read-link-in default-read-pools")); + ci.command(new Args("psu addto link default-write-link-in default-write-pools")); + + ci.command("psu set allpoolsactive on"); + + } + + /* + * test case: check that write with unknow storage group goes only to default-write pool + */ + @Benchmark + @Threads(value = 16) + public int matchCaching() { + + psu.setCachingEnabeled(true); + String address = adresses[ThreadLocalRandom.current().nextInt(adresses.length)]; + PoolPreferenceLevel[] preference = psu.match( + DirectionType.WRITE, // operation + address, // net unit + null, // protocol + fileAttributes, + null, // linkGroup + excludeNoPools); + return preference.length; + } + + + public static void main(String[] args) throws RunnerException { + Options opt = new OptionsBuilder() + .include(PoolSelectionUnitBenchmarkCaching.class.getSimpleName()) + .build(); + + new Runner(opt).run(); + } + +} diff --git a/modules/dcache/src/main/java/diskCacheV111/poolManager/PoolSelectionUnitV2.java b/modules/dcache/src/main/java/diskCacheV111/poolManager/PoolSelectionUnitV2.java index e4625d773d1..6d276daec59 100644 --- a/modules/dcache/src/main/java/diskCacheV111/poolManager/PoolSelectionUnitV2.java +++ b/modules/dcache/src/main/java/diskCacheV111/poolManager/PoolSelectionUnitV2.java @@ -15,6 +15,8 @@ import com.google.common.base.Predicates; import com.google.common.base.Splitter; import com.google.common.base.Strings; +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; import com.google.common.collect.ImmutableList; import com.google.common.collect.Lists; import com.google.common.collect.Maps; @@ -94,8 +96,13 @@ public String getVersion() { private final Map _linkGroups = new HashMap<>(); private final Map _uGroups = new HashMap<>(); private final Map _units = new HashMap<>(); + private final Cache cachedMatchValue = + CacheBuilder.newBuilder() + .maximumSize(100000) + .build(); private boolean _useRegex; private boolean _allPoolsActive; + public boolean _cachingEnabeled; /** * Ok, this is the critical part of PoolManager, but (!!!) the whole select path is READ-ONLY, @@ -108,9 +115,13 @@ public String getVersion() { private final Lock _psuWriteLock = _psuReadWriteLock.writeLock(); private final NetHandler _netHandler = new NetHandler(); - private transient PnfsHandler _pnfsHandler; + public void setCachingEnabeled(boolean cachingEnabeled) { + _cachingEnabeled = cachingEnabeled; + } + + @Override public Map getLinks() { rlock(); @@ -624,6 +635,8 @@ public PoolPreferenceLevel[] match(DirectionType type, String netUnitName, String hsm = storageInfo.getHsm(); String dCacheUnitName = storageInfo.getCacheClass(); + + /* * The preference level build requires these to be present in the file attributes. */ @@ -642,12 +655,43 @@ public PoolPreferenceLevel[] match(DirectionType type, String netUnitName, String storeUnitName = storageClass + "@" + hsm; Map variableMap = storageInfo.getMap(); + String netUnitGroup = null; LOGGER.debug( "running match: type={} store={} dCacheUnit={} net={} protocol={} keys={} locations={} linkGroup={}", type, storeUnitName, dCacheUnitName, netUnitName, protocolUnitName, variableMap, storageInfo.locations(), linkGroupName); + + + String cacheKey = null; + if (_cachingEnabeled) { + + try { + Unit unit = _netHandler.match(netUnitName); + netUnitGroup = unit._uGroupList.values() + .stream() + .map(UGroup::getName).findFirst().get(); + + LOGGER.debug("this IP address belongs to {} in uGroup {} " + netUnitName + netUnitGroup); + + + } catch (UnknownHostException e) { + LOGGER.error("Caching did not work, please check the configuration " + e); + } + + cacheKey = type.toString() + storeUnitName + dCacheUnitName + + netUnitGroup + protocolUnitName + linkGroupName; + + PoolPreferenceLevel[] cachedMatchValueTmp = cachedMatchValue.getIfPresent(cacheKey); + if (cachedMatchValueTmp != null) { + //counter = counter + 1; + //System.out.println("counter " + counter); + return cachedMatchValueTmp; + + } + } + PoolPreferenceLevel[] result = null; rlock(); try { @@ -670,7 +714,9 @@ public PoolPreferenceLevel[] match(DirectionType type, String netUnitName, if (LOGGER.isDebugEnabled()) { logResult(result); } - + if (_cachingEnabeled){ + cachedMatchValue.put(cacheKey, result); + } return result; } @@ -2600,7 +2646,11 @@ private String poolCountDescriptionFor(int count) { } protected void wlock() { + _psuWriteLock.lock(); + if (_cachingEnabeled) { + cachedMatchValue.invalidateAll(); + } } protected void wunlock() { diff --git a/modules/dcache/src/main/resources/diskCacheV111/poolManager/poolmanager.xml b/modules/dcache/src/main/resources/diskCacheV111/poolManager/poolmanager.xml index f3ff9b959f5..f32c2563f5d 100644 --- a/modules/dcache/src/main/resources/diskCacheV111/poolManager/poolmanager.xml +++ b/modules/dcache/src/main/resources/diskCacheV111/poolManager/poolmanager.xml @@ -34,6 +34,7 @@ Pool selection unit + diff --git a/modules/dcache/src/test/java/org/dcache/tests/poolmanager/PoolSelectionUnitTest.java b/modules/dcache/src/test/java/org/dcache/tests/poolmanager/PoolSelectionUnitTest.java index 6271e35b1d3..942741d1af6 100644 --- a/modules/dcache/src/test/java/org/dcache/tests/poolmanager/PoolSelectionUnitTest.java +++ b/modules/dcache/src/test/java/org/dcache/tests/poolmanager/PoolSelectionUnitTest.java @@ -327,6 +327,35 @@ public void testAnyRead() throws CommandException { } + @Test + public void testTheSameCached() throws CommandException { + + _ci.command("psu set allpoolsactive on"); + FileAttributes fileAttributes = new FileAttributes(); + StorageInfos.injectInto(GenericStorageInfo.valueOf("*", "*"), fileAttributes); + + _psu._cachingEnabeled = true; + + PoolPreferenceLevel[] preferenceRes1 = _psu.match( + DirectionType.READ, // operation + "131.169.214.149", // net unit + null, // protocol + fileAttributes, + null, // linkGroup + defaultExclude); + + PoolPreferenceLevel[] preferenceRes2 = _psu.match( + DirectionType.READ, // operation + "131.169.214.149", // net unit + null, // protocol + fileAttributes, + null, // linkGroup + defaultExclude); + + assertTrue(preferenceRes1 == preferenceRes2);// not equal, same + + } + /* * test case: check that write with unknow storage group goes only to default-write pool */ diff --git a/skel/share/defaults/poolmanager.properties b/skel/share/defaults/poolmanager.properties index 7c2ae5a5569..91eaeffb220 100644 --- a/skel/share/defaults/poolmanager.properties +++ b/skel/share/defaults/poolmanager.properties @@ -114,3 +114,11 @@ poolmanager.request-notifier.timeout=1 (obsolete)poolmanager.cell.export = See poolmanager.cell.consume (forbidden)poolmanager.plugins.selection-unit = (forbidden)poolmanager.plugins.quota-manager = + +# +# This property is used to optimise pool selection based on the idea that +# when a request is coming the probability that the next request for pool selection will +# have the same selection parameters is very high. depending on the set up you can switch on/ and off +# the caching of the selected pools. + +(one-of?true|false)poolmanager.selection.unit.cachingenabeled = false